diff --git a/.backportrc.json b/.backportrc.json index f5d83a8c539d4..68cde83e72285 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -3,9 +3,8 @@ "targetBranchChoices": [ "master", "8.0", + "7.17", "7.16", - "7.15", - "7.14", "6.8" ], "targetPRLabels": ["backport"], diff --git a/.ci/bwcVersions b/.ci/bwcVersions index d6f7dbed03b08..2e9efc96c1af2 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -44,7 +44,11 @@ BWC_VERSION: - "7.15.0" - "7.15.1" - "7.15.2" - - "7.15.3" - "7.16.0" + - "7.16.1" + - "7.16.2" + - "7.16.3" + - "7.16.4" + - "7.17.0" - "8.0.0" - "8.1.0" diff --git a/.ci/java-versions-aarch64.properties b/.ci/java-versions-aarch64.properties index 5486287ed0b59..1aa2b0cd12f13 100644 --- a/.ci/java-versions-aarch64.properties +++ b/.ci/java-versions-aarch64.properties @@ -4,5 +4,5 @@ # build and test Elasticsearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -ES_BUILD_JAVA=jdk16 +ES_BUILD_JAVA=jdk17 ES_RUNTIME_JAVA=jdk17 diff --git a/.ci/java-versions-fips.properties b/.ci/java-versions-fips.properties index 1e6dc4d241675..81c3f36298a75 100644 --- a/.ci/java-versions-fips.properties +++ b/.ci/java-versions-fips.properties @@ -4,5 +4,5 @@ # build and test Elasticsearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -ES_BUILD_JAVA=openjdk16 +ES_BUILD_JAVA=openjdk17 ES_RUNTIME_JAVA=openjdk17 diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index c1dc281d79eb0..8a0c526d414f5 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -4,5 +4,5 @@ # build and test Elasticsearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -ES_BUILD_JAVA=openjdk16 +ES_BUILD_JAVA=openjdk17 ES_RUNTIME_JAVA=openjdk17 diff --git a/.ci/jobs.t/defaults.yml b/.ci/jobs.t/defaults.yml index fa59584cefeed..2a6512dc020b4 100644 --- a/.ci/jobs.t/defaults.yml +++ b/.ci/jobs.t/defaults.yml @@ -53,6 +53,7 @@ url: https://github.com/elastic/elasticsearch/ - inject: properties-content: | + COMPOSE_HTTP_TIMEOUT=120 JOB_BRANCH=%BRANCH% HOME=$JENKINS_HOME GRADLEW=./gradlew --parallel --scan --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-upgrade.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-upgrade.yml index f401fcac96f15..bbff0960555c7 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-upgrade.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-upgrade.yml @@ -28,4 +28,4 @@ JAVA16_HOME=$HOME/.java/openjdk16 - shell: | #!/usr/local/bin/runbld --redirect-stderr - ./.ci/os.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ destructiveDistroUpgradeTest.v$BWC_VERSION + ./.ci/os.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v$BWC_VERSION diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml index ee2d1b57cac2d..7df53cb6bfb2b 100644 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml @@ -25,6 +25,7 @@ sudo cryptsetup open --key-file key.secret "$LOOP" secret --verbose sudo mkfs.ext2 /dev/mapper/secret sudo mkdir /mnt/secret + # Change /mnt/secret with care (at least a test uses this path to detect when encryption at rest is used) sudo mount /dev/mapper/secret /mnt/secret sudo chown -R jenkins /mnt/secret cp -r "$WORKSPACE" /mnt/secret diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark.yml new file mode 100644 index 0000000000000..638e553a41396 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark.yml @@ -0,0 +1,49 @@ +--- +- job: + name: "elastic+elasticsearch+pull-request+build-benchmark" + display-name: "elastic / elasticsearch - pull request build benchmark" + description: "Testing of Elasticsearch pull requests - build benchmark" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+build-bench" + scm: + - git: + refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" + branches: + - "${ghprbActualCommit}" + properties: + - inject: + properties-content: | + BUILD_PERFORMANCE_TEST=true + COMPOSE_HTTP_TIMEOUT=120 + JOB_BRANCH=%BRANCH% + HOME=$JENKINS_HOME + GRADLEW=./gradlew --parallel --scan --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ + GRADLEW_BAT=./gradlew.bat --parallel --scan --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ + triggers: + - github-pull-request: + org-list: + - elastic + allow-whitelist-orgs-as-admins: true + trigger-phrase: '.*run\W+elasticsearch-ci/build-bench.*' + github-hooks: true + status-context: elasticsearch-ci/build-benchmark + cancel-builds-on-update: true + black-list-target-branches: + - 6.8 + excluded-regions: + - ^docs/.* + white-list-labels: + - 'build-benchmark' + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests + $WORKSPACE/.ci/scripts/install-gradle-profiler.sh + $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-tool-update.scenarios --project-dir . --output-dir profile-out + tar -czf build/${BUILD_NUMBER}.tar.bz2 profile-out \ No newline at end of file diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml index 6bd8f4253bd1e..f9be84bd5f6c7 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml @@ -18,8 +18,11 @@ github-hooks: true status-context: elasticsearch-ci/example-plugins cancel-builds-on-update: true - white-list-labels: - - ':Delivery/Build' + included-regions: + - build-conventions/.* + - build-tools/.* + - build-tools-internal/.* + - plugins/examples/.* builders: - inject: properties-file: '.ci/java-versions.properties' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml index a7fbaec6fdd02..f99a3c1bdd32c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml @@ -19,6 +19,7 @@ status-context: elasticsearch-ci/rest-compatibility cancel-builds-on-update: true black-list-target-branches: + - 7.17 - 7.16 - 7.15 - 6.8 diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index 04ba9456963dd..acc3c2059990c 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -7,3 +7,4 @@ ES_RUNTIME_JAVA: - openjdk17 + - openjdk18 diff --git a/.ci/scripts/install-gradle-profiler.sh b/.ci/scripts/install-gradle-profiler.sh new file mode 100755 index 0000000000000..84bde5e11b2b2 --- /dev/null +++ b/.ci/scripts/install-gradle-profiler.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -e +# profiler version we wanna install +PROFILER_VERSION="0.16.0" +wget https://repo.gradle.org/gradle/ext-releases-local/org/gradle/profiler/gradle-profiler/$PROFILER_VERSION/gradle-profiler-$PROFILER_VERSION.zip -O $WORKSPACE/gradle-profiler-$PROFILER_VERSION.zip +unzip $WORKSPACE/gradle-profiler-$PROFILER_VERSION.zip +mv $WORKSPACE/gradle-profiler-$PROFILER_VERSION $WORKSPACE/gradle-profiler diff --git a/.ci/scripts/packaging-test.sh b/.ci/scripts/packaging-test.sh index 1509a2091d29b..3b0e41d0c09db 100755 --- a/.ci/scripts/packaging-test.sh +++ b/.ci/scripts/packaging-test.sh @@ -37,11 +37,6 @@ if [ -f "/etc/os-release" ] ; then if [[ "$ID" == "debian" || "$ID_LIKE" == "debian" ]] ; then # FIXME: The base image should not have rpm installed sudo rm -Rf /usr/bin/rpm - # Work around incorrect lintian version - # https://github.com/elastic/elasticsearch/issues/48573 - if [ $VERSION_ID == 10 ] ; then - sudo apt-get install -y --allow-downgrades lintian=2.15.0 - fi fi else cat /etc/issue || true diff --git a/.ci/scripts/run-gradle-profiler.sh b/.ci/scripts/run-gradle-profiler.sh new file mode 100755 index 0000000000000..c649c47e079e6 --- /dev/null +++ b/.ci/scripts/run-gradle-profiler.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# drop page cache and kernel slab objects on linux +[[ -x /usr/local/sbin/drop-caches ]] && sudo /usr/local/sbin/drop-caches +rm -Rfv $WORKSPACE/gradle-user-home +mkdir -p $WORKSPACE/gradle-user-home/init.d && cp -v $WORKSPACE/.ci/init.gradle $WORKSPACE/gradle-user-home +if [ "$(uname -m)" = "arm64" ] || [ "$(uname -m)" = "aarch64" ]; then + MAX_WORKERS=16 +elif [ -f /proc/cpuinfo ]; then + MAX_WORKERS=`grep '^cpu\scores' /proc/cpuinfo | uniq | sed 's/\s\+//g' | cut -d':' -f 2` +else + if [[ "$OSTYPE" == "darwin"* ]]; then + MAX_WORKERS=`sysctl -n hw.physicalcpu | sed 's/\s\+//g'` + else + echo "Unsupported OS Type: $OSTYPE" + exit 1 + fi +fi +if pwd | grep -v -q ^/dev/shm ; then + echo "Not running on a ramdisk, reducing number of workers" + MAX_WORKERS=$(($MAX_WORKERS*2/3)) +fi +set -e +GRADLE_PROFILER="$WORKSPACE/gradle-profiler/bin/gradle-profiler" +$GRADLE_PROFILER "-Dorg.gradle.workers.max=$MAX_WORKERS" $@ \ No newline at end of file diff --git a/.editorconfig b/.editorconfig index 0c8a9dfd38ba2..92111b6c074f9 100644 --- a/.editorconfig +++ b/.editorconfig @@ -10,10 +10,6 @@ trim_trailing_whitespace = true insert_final_newline = true indent_style = space -ij_formatter_off_tag = @formatter:off -ij_formatter_on_tag = @formatter:on -ij_formatter_tags_enabled = false - [*.gradle] ij_continuation_indent_size = 2 indent_size = 2 diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index e9343cc9cde43..f4c973475bfae 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,6 +3,3 @@ contact_links: - name: Question url: https://discuss.elastic.co/c/elasticsearch about: Ask (and answer) questions here. - - name: Security Vulnerability - url: https://www.elastic.co/community/security - about: Send security vulnerability reports to security@elastic.co. diff --git a/.idea/checkstyle-idea.xml b/.idea/checkstyle-idea.xml index 75cca1a8171f4..362146c5a4522 100644 --- a/.idea/checkstyle-idea.xml +++ b/.idea/checkstyle-idea.xml @@ -16,4 +16,4 @@ - \ No newline at end of file + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3c89a89ea2bdd..fc9935ca69794 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -129,7 +129,7 @@ script on Windows in the root of the repository. The examples below show the usage on Unix. We support development in IntelliJ versions IntelliJ 2020.1 and -onwards and Eclipse 2020-3 and onwards. +onwards and Eclipse 2021-12 and onwards. [Docker](https://docs.docker.com/install/) is required for building some Elasticsearch artifacts and executing certain test suites. You can run Elasticsearch without building all the artifacts with: @@ -196,6 +196,13 @@ need them. 5. **IMPORTANT** - make sure "Optimize Imports" is **NOT** selected. 6. Click "OK" +Alternative manual steps for IntelliJ. + + 1. Open **File > Settings/Preferences > Code Style > Java** + 2. Gear icon > Import Scheme > Eclipse XML Profile + 3. Navigate to the file `build-conventions/formatterConfig.xml` + 4. Click "OK" + Note that only some sub-projects in the Elasticsearch project are currently fully-formatted. You can see a list of project that **are not** automatically formatted in @@ -203,15 +210,8 @@ automatically formatted in ### Importing the project into Eclipse -Elasticsearch builds using Gradle and Java 16. When importing into Eclipse you -will either need to use an appropriate JDK to run Eclipse itself (e.g. by -specifying the VM in [eclipse.ini](https://wiki.eclipse.org/Eclipse.ini) or by -defining the JDK Gradle uses by setting **Preferences** > **Gradle** > -**Advanced Options** > **Java home** to an appropriate version. - -IMPORTANT: If you have previously imported the project by running `./gradlew eclipse` - then you must build an entirely new workspace and `git clean -xdf` to - blow away *everything* that the gradle eclipse plugin made. +Elasticsearch builds using Gradle and Java 17. You'll need to point +[eclipse.ini](https://wiki.eclipse.org/Eclipse.ini)'s `-vm` to Java 17. - Select **File > Import...** - Select **Existing Gradle Project** @@ -304,7 +304,7 @@ Please follow these formatting guidelines: * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. * If *absolutely* necessary, you can disable formatting for regions of code - with the `// @formatter:off` and `// @formatter:on` directives, but + with the `// tag::noformat` and `// end::noformat` directives, but only do this where the benefit clearly outweighs the decrease in formatting consistency. * Note that Javadoc and block comments i.e. `/* ... */` are not formatted, @@ -472,6 +472,241 @@ the [`Types.forciblyCast`](libs/core/src/main/java/org/elasticsearch/core/Types. utility method. As the name suggests, you can coerce any type to any other type, so please use it as a last resort. +### Logging + +The Elasticsearch server logs are vitally useful for diagnosing problems in a +running cluster. You should make sure that your contribution uses logging +appropriately: log enough detail to inform users about key events and help them +understand what happened when things go wrong without logging so much detail +that the logs fill up with noise and the useful signal is lost. + +Elasticsearch uses Log4J for logging. In most cases you should log via a +`Logger` named after the class that is writing the log messages, which you can +do by declaring a static field of the class as follows: + + private static final Logger logger = LogManager.getLogger(); + +In rare situations you may want to configure your `Logger` slightly +differently, perhaps specifying a different class or maybe using one of the +methods on `org.elasticsearch.common.logging.Loggers` instead. + +If the log message includes values from your code then you must use use +placeholders rather than constructing the string yourself using simple +concatenation. Consider wrapping the values in `[...]` to help distinguish them +from the static part of the message: + + logger.debug("operation failed [{}] times in [{}]ms", failureCount, elapsedMillis); + +You can also pass in an exception to log it including its stack trace, and any +causes and their causes, as well as any suppressed exceptions and so on: + + logger.debug("operation failed", exception); + +If you wish to use placeholders and an exception at the same time, construct a +`ParameterizedMessage`: + + logger.debug(new ParameterizedMessage("failed at offset [{}]", offset), exception); + +You can also use a `Supplier` to avoid constructing +expensive messages that will usually be discarded: + + logger.debug(() -> new ParameterizedMessage("rarely seen output [{}]", expensiveMethod())); + +Logging is an important behaviour of the system and sometimes deserves its own +unit tests, especially if there is complex logic for computing what is logged +and when to log it. You can use a `org.elasticsearch.test.MockLogAppender` to +make assertions about the logs that are being emitted. + +Logging is a powerful diagnostic technique but it is not the only possibility. +You should also consider exposing some information about your component via an +API instead of in logs. For instance you can implement APIs to report its +current status, various statistics, and maybe even details of recent failures. + +#### Log levels + +Each log message is written at a particular _level_. By default Elasticsearch +will suppress messages at the two most verbose levels, `TRACE` and `DEBUG`, and +will output messages at all other levels. Users can configure which levels of +message are written by each logger at runtime, but you should expect everyone +to run with the default configuration almost all of the time and choose your +levels accordingly. + +The guidance in this section is subjective in some areas. When in doubt, +discuss your choices with reviewers. + +##### `TRACE` + +This is the most verbose level, disabled by default, and it is acceptable if it +generates a very high volume of logs. The target audience of `TRACE` logs +comprises developers who are trying to deeply understand some unusual runtime +behaviour of a system. For instance `TRACE` logs may be useful when +understanding an unexpected interleaving of concurrent actions or some +unexpected consequences of a delayed response from a remote node. + +`TRACE` logs will normally only make sense when read alongside the code, and +typically they will be read as a whole sequence of messages rather than in +isolation. For example, the `InternalClusterInfoService` uses `TRACE` logs to +record certain key events in its periodic refresh process: + + logger.trace("starting async refresh"); + // ... + logger.trace("received node stats response"); + // ... + logger.trace("received indices stats response"); + // ... + logger.trace("stats all received, computing cluster info and notifying listeners"); + // ... + logger.trace("notifying [{}] of new cluster info", listener); + +Even though `TRACE` logs may be very verbose, you should still exercise some +judgement when deciding when to use them. In many cases it will be easier to +understand the behaviour of the system using tests or by analysing the code +itself rather than by trawling through hundreds of trivial log messages. + +It may not be easy, or even possible, to obtain `TRACE` logs from a production +system. Therefore they are not appropriate for information that you would +normally expect to be useful in diagnosing problems in production. + +##### `DEBUG` + +This is the next least verbose level and is also disabled by default. The +target audience of this level typically comprises users or developers who are +trying to diagnose an unexpected problem in a production system, perhaps to +help determine whether a fault lies within Elasticsearch or elsewhere. + +Users should expect to be able to enable `DEBUG` logging on their production +systems for a whole subsystem for an extended period of time without +overwhelming the system or filling up their disks with logs, so it is important +to limit the volume of messages logged at this level. On the other hand, these +messages must still provide enough detail to diagnose the sorts of problems +that you expect Elasticsearch to encounter. In some cases it works well to +collect information over a period of time and then log a complete summary, +rather than recording every step of a process in its own message. + +For example, the `Coordinator` uses `DEBUG` logs to record a change in mode, +including various internal details for context, because this event is fairly +rare but not important enough to notify users by default: + + logger.debug( + "{}: coordinator becoming CANDIDATE in term {} (was {}, lastKnownLeader was [{}])", + method, + getCurrentTerm(), + mode, + lastKnownLeader + ); + +It's possible that the reader of `DEBUG` logs is also reading the code, but +that is less likely than for `TRACE` logs. Strive to avoid terminology that +only makes sense when reading the code, and also aim for messages at this level +to be self-contained rather than intending them to be read as a sequence. + +It's often useful to log exceptions and other deviations from the "happy path" +at `DEBUG` level. Exceptions logged at `DEBUG` should generally include the +complete stack trace. + +##### `INFO` + +This is the next least verbose level, and the first level that is enabled by +default. It is appropriate for recording important events in the life of the +cluster, such as an index being created or deleted or a snapshot starting or +completing. Users will mostly ignore log messages at `INFO` level, but may use +these messages to construct a high-level timeline of events leading up to an +incident. + +For example, the `MetadataIndexTemplateService` uses `INFO` logs to record when +an index template is created or updated: + + logger.info( + "{} index template [{}] for index patterns {}", + existing == null ? "adding" : "updating", + name, + template.indexPatterns() + ); + +`INFO`-level logging is enabled by default so its target audience is the +general population of users and administrators. You should use user-facing +terminology and ensure that messages at this level are self-contained. In +general you shouldn't log unusual events, particularly exceptions with stack +traces, at `INFO` level. If the event is relatively benign then use `DEBUG`, +whereas if the user should be notified then use `WARN`. + +Bear in mind that users will be reading the logs when they're trying to +determine why their node is not behaving the way they expect. If a log message +sounds like an error then some users will interpret it as one, even if it is +logged at `INFO` level. Where possible, `INFO` messages should prefer factual +over judgemental language, for instance saying `Did not find ...` rather than +`Failed to find ...`. + +##### `WARN` + +This is the next least verbose level, and is also enabled by default. Ideally a +healthy cluster will emit no `WARN`-level logs, but this is the appropriate +level for recording events that the cluster administrator should investigate, +or which indicate a bug. Some production environments require the cluster to +emit no `WARN`-level logs during acceptance testing, so you must ensure that +any logs at this level really do indicate a problem that needs addressing. + +As with the `INFO` level, you should use user-facing terminology at the `WARN` +level, and also ensure that messages are self-contained. Strive to make them +actionable too since you should be logging at this level when the user should +take some investigative action. + +For example, the `DiskThresholdMonitor` uses `WARN` logs to record that a disk +threshold has been breached: + + logger.warn( + "flood stage disk watermark [{}] exceeded on {}, all indices on this node will be marked read-only", + diskThresholdSettings.describeFloodStageThreshold(), + usage + ); + +Unlike at the `INFO` level, it is often appropriate to log an exception, +complete with stack trace, at `WARN` level. Although the stack trace may not be +useful to the user, it may contain information that is vital for a developer to +fully understand the problem and its wider context. + +In a situation where occasional transient failures are expected and handled, +but a persistent failure requires the user's attention, consider implementing a +mechanism to detect that a failure is unacceptably persistent and emit a +corresponding `WARN` log. For example, it may be helpful to log every tenth +consecutive failure at `WARN` level, or log at `WARN` if an operation has not +completed within a certain time limit. This is much more user-friendly than +failing persistently and silently by default and requiring the user to enable +`DEBUG` logging to investigate the problem. + +If an exception occurs as a direct result of a request received from a client +then it should only be logged as a `WARN` if the server administrator is the +right person to address it. In most cases the server administrator cannot do +anything about faulty client requests, and the person running the client is +often unable to see the server logs, so you should include the exception in the +response back to the client and not log a warning. Bear in mind that clients +may submit requests at a high rate, so any per-request logging can easily flood +the logs. + +##### `ERROR` + +This is the next least verbose level after `WARN`. In theory it is possible for +users to suppress messages at `WARN` and below, believing this to help them +focus on the most important `ERROR` messages, but in practice in Elasticsearch +this will hide so much useful information that the resulting logs will be +useless, so we do not expect users to do this kind of filtering. + +On the other hand, users may be familiar with the `ERROR` level from elsewhere. +Log4J for instance documents this level as meaning "an error in the +application, possibly recoverable". The implication here is that the error is +possibly _not_ recoverable too, and we do encounter users that get very worried +by logs at `ERROR` level for this reason. + +Therefore you should try and avoid logging at `ERROR` level unless the error +really does indicate that Elasticsearch is now running in a degraded state from +which it will not recover. For instance, the `FsHealthService` uses `ERROR` +logs to record that the data path failed some basic health checks and hence the +node cannot continue to operate as a member of the cluster: + + logger.error(new ParameterizedMessage("health check of [{}] failed", path), ex); + +Errors like this should be very rare. When in doubt, prefer `WARN` to `ERROR`. + ### Creating A Distribution Run all build commands from within the root directory: diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 6b83a2635a278..8aab31a0e599e 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -64,6 +64,15 @@ NOTE: If you have imported the project into IntelliJ according to the instructio link:/CONTRIBUTING.md#importing-the-project-into-intellij-idea[CONTRIBUTING.md] then a debug run configuration named "Debug Elasticsearch" will be created for you and configured appropriately. +==== Disabling assertions + +When running Elasticsearch with `./gradlew run`, assertions are enabled by +default. To disable them, add the following command line option: + +------------------------- +-Dtests.jvm.argline="-da" +------------------------- + ==== Distribution By default a node is started with the zip distribution. @@ -92,12 +101,12 @@ password: `elastic-password`. ==== Other useful arguments - In order to start a node with a different max heap space add: `-Dtests.heap.size=4G` -- In order to disable assertions add: `-Dtests.asserts=false` - In order to use a custom data directory: `--data-dir=/tmp/foo` - In order to preserve data in between executions: `--preserve-data` - In order to remotely attach a debugger to the process: `--debug-jvm` - In order to set a different keystore password: `--keystore-password` - In order to set an Elasticsearch setting, provide a setting with the following prefix: `-Dtests.es.` +- In order to pass a JVM seting, e.g. to disable assertions: `-Dtests.jvm.argline="-da"` === Test case filtering. diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/indices/common/RoundingBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/indices/common/RoundingBenchmark.java index 9efcfef8088ee..7ee8c33b2fe8e 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/indices/common/RoundingBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/indices/common/RoundingBenchmark.java @@ -89,16 +89,11 @@ public void buildDates() { roundingBuilder = Rounding.builder(TimeValue.parseTimeValue(interval, "interval")); } Rounding rounding = roundingBuilder.timeZone(ZoneId.of(zone)).build(); - switch (rounder) { - case "java time": - rounderBuilder = rounding::prepareJavaTime; - break; - case "es": - rounderBuilder = () -> rounding.prepare(min, max); - break; - default: - throw new IllegalArgumentException("Expectd rounder to be [java time] or [es]"); - } + rounderBuilder = switch (rounder) { + case "java time" -> rounding::prepareJavaTime; + case "es" -> () -> rounding.prepare(min, max); + default -> throw new IllegalArgumentException("Expected rounder to be [java time] or [es]"); + }; } @Benchmark diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java index 05ac262c533e5..2a356b7883a02 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java @@ -101,28 +101,19 @@ public class ScriptScoreBenchmark { @Setup public void setupScript() { - switch (script) { - case "expression": - factory = scriptModule.engines.get("expression").compile("test", "doc['n'].value", ScoreScript.CONTEXT, Map.of()); - break; - case "metal": - factory = bareMetalScript(); - break; - case "painless_cast": - factory = scriptModule.engines.get("painless") - .compile( - "test", - "((org.elasticsearch.index.fielddata.ScriptDocValues.Longs)doc['n']).value", - ScoreScript.CONTEXT, - Map.of() - ); - break; - case "painless_def": - factory = scriptModule.engines.get("painless").compile("test", "doc['n'].value", ScoreScript.CONTEXT, Map.of()); - break; - default: - throw new IllegalArgumentException("Don't know how to implement script [" + script + "]"); - } + factory = switch (script) { + case "expression" -> scriptModule.engines.get("expression").compile("test", "doc['n'].value", ScoreScript.CONTEXT, Map.of()); + case "metal" -> bareMetalScript(); + case "painless_cast" -> scriptModule.engines.get("painless") + .compile( + "test", + "((org.elasticsearch.index.fielddata.ScriptDocValues.Longs)doc['n']).value", + ScoreScript.CONTEXT, + Map.of() + ); + case "painless_def" -> scriptModule.engines.get("painless").compile("test", "doc['n'].value", ScoreScript.CONTEXT, Map.of()); + default -> throw new IllegalArgumentException("Don't know how to implement script [" + script + "]"); + }; } @Setup diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java new file mode 100644 index 0000000000000..8d3bb4940faf2 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java @@ -0,0 +1,204 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.benchmark.search; + +import org.apache.logging.log4j.util.Strings; +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalyzerScope; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperRegistry; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.search.QueryParserHelper; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptCompiler; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +@Fork(1) +@Warmup(iterations = 5) +@Measurement(iterations = 5) +@State(Scope.Benchmark) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@BenchmarkMode(Mode.AverageTime) +public class QueryParserHelperBenchmark { + + private static final int NUMBER_OF_MAPPING_FIELDS = 1000; + + private Directory directory; + private IndexReader indexReader; + private MapperService mapperService; + + @Setup + public void setup() throws IOException { + // pre: set up MapperService and SearchExecutionContext + List fields = new ArrayList<>(); + for (int i = 0; i < NUMBER_OF_MAPPING_FIELDS; i++) { + fields.add(String.format(""" + "field%d":{"type":"long"}""", i)); + } + String mappings = """ + {"_doc":{"properties":{""" + Strings.join(fields, ',') + "}}}"; + + mapperService = createMapperService(mappings); + IndexWriterConfig iwc = new IndexWriterConfig(IndexShard.buildIndexAnalyzer(mapperService)); + directory = new ByteBuffersDirectory(); + IndexWriter iw = new IndexWriter(directory, iwc); + + for (int i = 0; i < 2000; i++) { + ParsedDocument doc = mapperService.documentMapper().parse(buildDoc(i)); + iw.addDocument(doc.rootDoc()); + if (i % 100 == 0) { + iw.commit(); + } + } + iw.close(); + + indexReader = DirectoryReader.open(directory); + } + + private SourceToParse buildDoc(int docId) { + List fields = new ArrayList<>(); + for (int i = 0; i < NUMBER_OF_MAPPING_FIELDS; i++) { + if (i % 2 == 0) continue; + if (i % 3 == 0 && (docId < (NUMBER_OF_MAPPING_FIELDS / 2))) continue; + fields.add(String.format(""" + "field%d":1""", i)); + } + String source = "{" + String.join(",", fields) + "}"; + return new SourceToParse("" + docId, new BytesArray(source), XContentType.JSON); + } + + @TearDown + public void tearDown() { + IOUtils.closeWhileHandlingException(indexReader, directory); + } + + @Benchmark + public void expand() { + Map fields = QueryParserHelper.resolveMappingFields(buildSearchExecutionContext(), Map.of("*", 1f)); + assert fields.size() > 0 && fields.size() < NUMBER_OF_MAPPING_FIELDS; + } + + protected SearchExecutionContext buildSearchExecutionContext() { + final SimilarityService similarityService = new SimilarityService(mapperService.getIndexSettings(), null, Map.of()); + final long nowInMillis = 1; + return new SearchExecutionContext( + 0, + 0, + mapperService.getIndexSettings(), + null, + (ft, idxName, lookup) -> ft.fielddataBuilder(idxName, lookup) + .build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()), + mapperService, + mapperService.mappingLookup(), + similarityService, + null, + XContentParserConfiguration.EMPTY.withRegistry(new NamedXContentRegistry(ClusterModule.getNamedXWriteables())) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), + null, + new IndexSearcher(indexReader), + () -> nowInMillis, + null, + null, + () -> true, + null, + Collections.emptyMap() + ); + } + + protected final MapperService createMapperService(String mappings) { + Settings settings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 1) + .put("index.version.created", Version.CURRENT) + .build(); + IndexMetadata meta = IndexMetadata.builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(meta, settings); + MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); + + SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); + MapperService mapperService = new MapperService( + indexSettings, + new IndexAnalyzers( + Map.of("default", new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer())), + Map.of(), + Map.of() + ), + XContentParserConfiguration.EMPTY.withRegistry(new NamedXContentRegistry(ClusterModule.getNamedXWriteables())) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + similarityService, + mapperRegistry, + () -> { throw new UnsupportedOperationException(); }, + new IdFieldMapper(() -> true), + new ScriptCompiler() { + @Override + public T compile(Script script, ScriptContext scriptContext) { + throw new UnsupportedOperationException(); + } + } + ); + + try { + mapperService.merge("_doc", new CompressedXContent(mappings), MapperService.MergeReason.MAPPING_UPDATE); + return mapperService; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java index 43a2d1930d0e8..f8d2e148b16ce 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java @@ -107,20 +107,12 @@ public class AggConstructionContentionBenchmark { @Setup public void setup() { - switch (breaker) { - case "real": - breakerService = new HierarchyCircuitBreakerService(Settings.EMPTY, List.of(), clusterSettings); - break; - case "preallocate": - preallocateBreaker = true; - breakerService = new HierarchyCircuitBreakerService(Settings.EMPTY, List.of(), clusterSettings); - break; - case "noop": - breakerService = new NoneCircuitBreakerService(); - break; - default: - throw new UnsupportedOperationException(); - } + breakerService = switch (breaker) { + case "real", "preallocate" -> new HierarchyCircuitBreakerService(Settings.EMPTY, List.of(), clusterSettings); + case "noop" -> new NoneCircuitBreakerService(); + default -> throw new UnsupportedOperationException(); + }; + preallocateBreaker = breaker.equals("preallocate"); bigArrays = new BigArrays(recycler, breakerService, "request"); } @@ -354,6 +346,11 @@ public boolean enableRewriteToFilterByFilter() { return true; } + @Override + public boolean isInSortOrderExecutionRequired() { + return false; + } + @Override public void close() { List releaseMe = new ArrayList<>(this.releaseMe); diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java index 7b53bb55f51bd..3c34166dbd4ea 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java @@ -24,8 +24,8 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; @@ -65,29 +65,21 @@ @Fork(value = 1) public class TermsReduceBenchmark { - private final SearchPhaseController controller = new SearchPhaseController( - (task, req) -> new InternalAggregation.ReduceContextBuilder() { - @Override - public InternalAggregation.ReduceContext forPartialReduction() { - return InternalAggregation.ReduceContext.forPartialReduction(null, null, () -> PipelineAggregator.PipelineTree.EMPTY, task); - } + private final SearchPhaseController controller = new SearchPhaseController((task, req) -> new AggregationReduceContext.Builder() { + @Override + public AggregationReduceContext forPartialReduction() { + return new AggregationReduceContext.ForPartial(null, null, task); + } - @Override - public InternalAggregation.ReduceContext forFinalReduction() { - final MultiBucketConsumerService.MultiBucketConsumer bucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( - Integer.MAX_VALUE, - new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) - ); - return InternalAggregation.ReduceContext.forFinalReduction( - null, - null, - bucketConsumer, - PipelineAggregator.PipelineTree.EMPTY, - task - ); - } + @Override + public AggregationReduceContext forFinalReduction() { + final MultiBucketConsumerService.MultiBucketConsumer bucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + Integer.MAX_VALUE, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ); + return new AggregationReduceContext.ForFinal(null, null, bucketConsumer, PipelineAggregator.PipelineTree.EMPTY, task); } - ); + }); @State(Scope.Benchmark) public static class TermsList extends AbstractList { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java index 4d8d73ca0a026..5c768a7dcb1ef 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java @@ -52,22 +52,13 @@ public class FetchSourcePhaseBenchmark { @Setup public void setup() throws IOException { - switch (source) { - case "tiny": - sourceBytes = new BytesArray("{\"message\": \"short\"}"); - break; - case "short": - sourceBytes = read300BytesExample(); - break; - case "one_4k_field": - sourceBytes = buildBigExample("huge".repeat(1024)); - break; - case "one_4m_field": - sourceBytes = buildBigExample("huge".repeat(1024 * 1024)); - break; - default: - throw new IllegalArgumentException("Unknown source [" + source + "]"); - } + sourceBytes = switch (source) { + case "tiny" -> new BytesArray("{\"message\": \"short\"}"); + case "short" -> read300BytesExample(); + case "one_4k_field" -> buildBigExample("huge".repeat(1024)); + case "one_4m_field" -> buildBigExample("huge".repeat(1024 * 1024)); + default -> throw new IllegalArgumentException("Unknown source [" + source + "]"); + }; fetchContext = new FetchSourceContext( true, Strings.splitStringByCommaToArray(includes), diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/xcontent/FilterContentBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/xcontent/FilterContentBenchmark.java index c6ff83762b014..6f4e926dbe969 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/xcontent/FilterContentBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/xcontent/FilterContentBenchmark.java @@ -8,11 +8,14 @@ package org.elasticsearch.benchmark.xcontent; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.search.fetch.subphase.FetchSourcePhase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -39,8 +42,8 @@ import java.util.stream.Collectors; @Fork(1) -@Warmup(iterations = 2) -@Measurement(iterations = 3) +@Warmup(iterations = 1) +@Measurement(iterations = 2) @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.NANOSECONDS) @State(Scope.Benchmark) @@ -61,20 +64,12 @@ public class FilterContentBenchmark { @Setup public void setup() throws IOException { - String sourceFile; - switch (type) { - case "cluster_stats": - sourceFile = "monitor_cluster_stats.json"; - break; - case "index_stats": - sourceFile = "monitor_index_stats.json"; - break; - case "node_stats": - sourceFile = "monitor_node_stats.json"; - break; - default: - throw new IllegalArgumentException("Unknown type [" + type + "]"); - } + String sourceFile = switch (type) { + case "cluster_stats" -> "monitor_cluster_stats.json"; + case "index_stats" -> "monitor_index_stats.json"; + case "node_stats" -> "monitor_node_stats.json"; + default -> throw new IllegalArgumentException("Unknown type [" + type + "]"); + }; source = readSource(sourceFile); filters = buildFilters(); parserConfig = buildParseConfig(); @@ -84,31 +79,25 @@ private Set buildFilters() { Map flattenMap = Maps.flatten(XContentHelper.convertToMap(source, true, XContentType.JSON).v2(), false, true); Set keys = flattenMap.keySet(); AtomicInteger count = new AtomicInteger(); - switch (fieldCount) { - case "10_field": - return keys.stream().filter(key -> count.getAndIncrement() % 5 == 0).limit(10).collect(Collectors.toSet()); - case "half_field": - return keys.stream().filter(key -> count.getAndIncrement() % 2 == 0).collect(Collectors.toSet()); - case "all_field": - return new HashSet<>(keys); - case "wildcard_field": - return new HashSet<>(Arrays.asList("*stats")); - case "10_wildcard_field": - return Set.of( - "*stats.nodes*", - "*stats.ind*", - "*sta*.shards", - "*stats*.xpack", - "*stats.*.segments", - "*stat*.*.data*", - inclusive ? "*stats.**.request_cache" : "*stats.*.request_cache", - inclusive ? "*stats.**.stat" : "*stats.*.stat", - inclusive ? "*stats.**.threads" : "*stats.*.threads", - "*source_node.t*" - ); - default: - throw new IllegalArgumentException("Unknown type [" + type + "]"); - } + return switch (fieldCount) { + case "10_field" -> keys.stream().filter(key -> count.getAndIncrement() % 5 == 0).limit(10).collect(Collectors.toSet()); + case "half_field" -> keys.stream().filter(key -> count.getAndIncrement() % 2 == 0).collect(Collectors.toSet()); + case "all_field" -> new HashSet<>(keys); + case "wildcard_field" -> new HashSet<>(Arrays.asList("*stats")); + case "10_wildcard_field" -> Set.of( + "*stats.nodes*", + "*stats.ind*", + "*sta*.shards", + "*stats*.xpack", + "*stats.*.segments", + "*stat*.*.data*", + inclusive ? "*stats.**.request_cache" : "*stats.*.request_cache", + inclusive ? "*stats.**.stat" : "*stats.*.stat", + inclusive ? "*stats.**.threads" : "*stats.*.threads", + "*source_node.t*" + ); + default -> throw new IllegalArgumentException("Unknown type [" + type + "]"); + }; } @Benchmark @@ -122,6 +111,47 @@ public BytesReference filterWithNewParserConfig() throws IOException { return filter(contentParserConfiguration); } + @Benchmark + public BytesReference filterWithMap() throws IOException { + Map sourceMap = XContentHelper.convertToMap(source, false).v2(); + String[] includes; + String[] excludes; + if (inclusive) { + includes = filters.toArray(Strings.EMPTY_ARRAY); + excludes = null; + } else { + includes = null; + excludes = filters.toArray(Strings.EMPTY_ARRAY); + } + Map filterMap = XContentMapValues.filter(sourceMap, includes, excludes); + return FetchSourcePhase.objectToBytes(filterMap, XContentType.JSON, Math.min(1024, source.length())); + } + + @Benchmark + public BytesReference filterWithBuilder() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(Math.min(1024, source.length())); + Set includes; + Set excludes; + if (inclusive) { + includes = filters; + excludes = Set.of(); + } else { + includes = Set.of(); + excludes = filters; + } + XContentBuilder builder = new XContentBuilder( + XContentType.JSON.xContent(), + streamOutput, + includes, + excludes, + XContentType.JSON.toParsedMediaType() + ); + try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, source.streamInput())) { + builder.copyCurrentStructure(parser); + return BytesReference.bytes(builder); + } + } + private XContentParserConfiguration buildParseConfig() { Set includes; Set excludes; diff --git a/build-conventions/build.gradle b/build-conventions/build.gradle index f540e7680fbfa..0e1f295b7e8d9 100644 --- a/build-conventions/build.gradle +++ b/build-conventions/build.gradle @@ -16,9 +16,9 @@ plugins { group = "org.elasticsearch" -def minRuntimeJava = JavaVersion.toVersion(file('../build-tools-internal/src/main/resources/minimumRuntimeVersion').text) -targetCompatibility = minRuntimeJava -sourceCompatibility = minRuntimeJava +// This project contains Checkstyle rule implementations used by IDEs which use a Java 11 runtime +targetCompatibility = 11 +sourceCompatibility = 11 gradlePlugin { // We already configure publication and we don't need or want the one that comes @@ -65,7 +65,7 @@ dependencies { api 'org.apache.maven:maven-model:3.6.2' api 'gradle.plugin.com.github.jengelman.gradle.plugins:shadow:7.0.0' api 'org.apache.rat:apache-rat:0.11' - compileOnly "com.puppycrawl.tools:checkstyle:8.42" + compileOnly "com.puppycrawl.tools:checkstyle:8.45.1" api('com.diffplug.spotless:spotless-plugin-gradle:6.0.0') { exclude module: "groovy-xml" } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java index 23155cc2971e7..52e19b8023238 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java @@ -30,6 +30,7 @@ import com.puppycrawl.tools.checkstyle.utils.TokenUtil; import java.util.HashSet; +import java.util.List; import java.util.Locale; import java.util.Objects; import java.util.Set; @@ -71,9 +72,21 @@ public class HiddenFieldCheck extends AbstractCheck { /** Control whether to ignore constructor parameters. */ private boolean ignoreConstructorParameter; + /** Control whether to ignore variables in constructor bodies. */ + private boolean ignoreConstructorBody; + /** Control whether to ignore parameters of abstract methods. */ private boolean ignoreAbstractMethods; + /** If set, specifies a regex of method names that should be ignored */ + private String ignoreMethodNames; + + /** If set, specifies a regex of constructor names that should be ignored */ + private String ignoreConstructorMethods; + + /** Control the minimal amount of lines in method to allow shadowed variables .*/ + private int minLineCount = -1; + @Override public int[] getDefaultTokens() { return getAcceptableTokens(); @@ -224,7 +237,8 @@ private void processVariable(DetailAST ast) { if ((frame.containsStaticField(name) || isInstanceField(ast, name)) && isMatchingRegexp(name) == false - && isIgnoredParam(ast, name) == false) { + && isIgnoredParam(ast, name) == false + && isIgnoredVariable(ast, name) == false) { log(nameAST, MSG_KEY, name); } } @@ -238,7 +252,15 @@ && isIgnoredParam(ast, name) == false) { * @return true if parameter is ignored. */ private boolean isIgnoredParam(DetailAST ast, String name) { - return isIgnoredSetterParam(ast, name) || isIgnoredConstructorParam(ast) || isIgnoredParamOfAbstractMethod(ast); + return isVariableInIgnoredMethod(ast, name) + || isIgnoredSetterParam(ast, name) + || isIgnoredConstructorParam(ast) + || isIgnoredParamOfAbstractMethod(ast); + } + + private boolean isIgnoredVariable(DetailAST ast, String name) { + return isVariableInConstructorBody(ast, name) || + isVariableInIgnoredConstructor(ast, name); } /** @@ -328,24 +350,33 @@ private boolean isSetterMethod(DetailAST aMethodAST, String aName) { boolean isSetterMethod = false; // ES also allows setters with the same name as a property, and builder-style settings that start with "with". - if (("set" + capitalize(aName)).equals(methodName) || ("with" + capitalize(aName)).equals(methodName) || aName.equals(methodName)) { + final List possibleSetterNames = List.of( + "set" + capitalize(aName, true), + "set" + capitalize(aName, false), + "with" + capitalize(aName, true), + "with" + capitalize(aName, false), + aName + ); + + if (possibleSetterNames.contains(methodName)) { // method name did match set${Name}(${anyType} ${aName}) // where ${Name} is capitalized version of ${aName} // therefore this method is potentially a setter final DetailAST typeAST = aMethodAST.findFirstToken(TokenTypes.TYPE); final String returnType = typeAST.getFirstChild().getText(); - if (typeAST.findFirstToken(TokenTypes.LITERAL_VOID) != null || setterCanReturnItsClass && frame.isEmbeddedIn(returnType)) { - // this method has signature - // - // void set${Name}(${anyType} ${name}) - // - // and therefore considered to be a setter - // - // or - // - // return type is not void, but it is the same as the class - // where method is declared and and mSetterCanReturnItsClass - // is set to true + + // The method is named `setFoo`, `withFoo`, or just `foo` and returns void + final boolean returnsVoid = typeAST.findFirstToken(TokenTypes.LITERAL_VOID) != null; + + // Or the method is named as above, and returns the class type or a builder type. + // It ought to be possible to see if we're in a `${returnType}.Builder`, but for some reason the parse + // tree has `returnType` as `.` when the current class is `Builder` so instead assume that a class called `Builder` is OK. + final boolean returnsSelf = setterCanReturnItsClass && frame.isEmbeddedIn(returnType); + + final boolean returnsBuilder = setterCanReturnItsClass + && (frame.isEmbeddedIn(returnType + "Builder") || (frame.isEmbeddedIn("Builder"))); + + if (returnsVoid || returnsSelf || returnsBuilder) { isSetterMethod = true; } } @@ -360,13 +391,13 @@ private boolean isSetterMethod(DetailAST aMethodAST, String aName) { * @param name a property name * @return capitalized property name */ - private static String capitalize(final String name) { + private static String capitalize(final String name, boolean javaBeanCompliant) { String setterName = name; // we should not capitalize the first character if the second // one is a capital one, since according to JavaBeans spec // setXYzz() is a setter for XYzz property, not for xYzz one. - // @pugnascotia: unless the first char is 'x'. - if (name.length() == 1 || (Character.isUpperCase(name.charAt(1)) == false || name.charAt(0) == 'x')) { + // @pugnascotia: this is unhelpful in the Elasticsearch codebase. We have e.g. xContent -> setXContent, or nNeighbors -> nNeighbors. + if (name.length() == 1 || (javaBeanCompliant == false || Character.isUpperCase(name.charAt(1)) == false)) { setterName = name.substring(0, 1).toUpperCase(Locale.ENGLISH) + name.substring(1); } return setterName; @@ -410,6 +441,64 @@ private boolean isIgnoredParamOfAbstractMethod(DetailAST ast) { return result; } + /** + * Decides whether to ignore an AST node that is witin a method that ought to be ignored. + * This is either because: + * + *
    + *
  • The method name matches the {@link #ignoreMethodNames} regex, if set.
  • + *
  • The method's line length is less that or equal to the {@link #minLineCount}
  • + *
+ * + * @param ast the AST to check + * @return true is the ast node should be ignored + */ + private boolean isVariableInIgnoredMethod(DetailAST ast, String name) { + boolean result = false; + if (ignoreMethodNames != null && (ast.getType() == TokenTypes.PARAMETER_DEF || ast.getType() == TokenTypes.VARIABLE_DEF)) { + DetailAST method = ast.getParent(); + while (method != null && method.getType() != TokenTypes.METHOD_DEF) { + method = method.getParent(); + } + if (method != null && method.getType() == TokenTypes.METHOD_DEF) { + final String methodName = method.findFirstToken(TokenTypes.IDENT).getText(); + result = methodName.matches(ignoreMethodNames) || getMethodsNumberOfLine(method) <= this.minLineCount; + } + } + return result; + } + + private boolean isVariableInConstructorBody(DetailAST ast, String name) { + boolean result = false; + + if (ignoreConstructorBody && ast.getType() == TokenTypes.VARIABLE_DEF) { + DetailAST method = ast.getParent(); + while (method != null && method.getType() != TokenTypes.CTOR_DEF) { + method = method.getParent(); + } + result = method != null && method.getType() == TokenTypes.CTOR_DEF; + } + + return result; + } + + private boolean isVariableInIgnoredConstructor(DetailAST ast, String name) { + boolean result = false; + + if (ignoreConstructorBody && ast.getType() == TokenTypes.VARIABLE_DEF) { + DetailAST method = ast.getParent(); + while (method != null && method.getType() != TokenTypes.LITERAL_NEW) { + method = method.getParent(); + } + if (method != null) { + final String ctorName = method.findFirstToken(TokenTypes.IDENT).getText(); + result = ctorName.matches(this.ignoreConstructorMethods); + } + } + + return result; + } + /** * Setter to define the RegExp for names of variables and parameters to ignore. * @@ -463,6 +552,22 @@ public void setIgnoreAbstractMethods(boolean ignoreAbstractMethods) { this.ignoreAbstractMethods = ignoreAbstractMethods; } + public void setIgnoreMethodNames(String ignoreMethodNames) { + this.ignoreMethodNames = ignoreMethodNames; + } + + public void setIgnoreConstructorBody(boolean ignoreConstructorBody) { + this.ignoreConstructorBody = ignoreConstructorBody; + } + + public void setIgnoreConstructorMethods(String ignoreConstructorMethods) { + this.ignoreConstructorMethods = ignoreConstructorMethods; + } + + public void setMinLineCount(int minLineCount) { + this.minLineCount = minLineCount; + } + /** * Holds the names of static and instance fields of a type. */ @@ -568,4 +673,18 @@ private boolean isEmbeddedIn(String classOrEnumName) { } + private static int getMethodsNumberOfLine(DetailAST methodDef) { + final int numberOfLines; + final DetailAST lcurly = methodDef.getLastChild(); + final DetailAST rcurly = lcurly.getLastChild(); + + if (lcurly.getFirstChild() == rcurly) { + numberOfLines = 1; + } + else { + numberOfLines = rcurly.getLineNo() - lcurly.getLineNo() - 1; + } + return numberOfLines; + } + } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/SnippetLengthCheck.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/SnippetLengthCheck.java index 14c2b74e6e6bc..7053bd1626153 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/SnippetLengthCheck.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/SnippetLengthCheck.java @@ -22,9 +22,12 @@ /** * Checks the snippets included in the docs aren't too wide to fit on * the page. + *

+ * Regions contained in the special noformat tag are exempt from the length + * check. This region is also exempt from automatic formatting. */ public class SnippetLengthCheck extends AbstractFileSetCheck { - private static final Pattern START = Pattern.compile("^( *)//\\s*tag::(.+?)\\s*$", Pattern.MULTILINE); + private static final Pattern START = Pattern.compile("^( *)//\\s*tag::(?!noformat)(.+?)\\s*$", Pattern.MULTILINE); private int max; /** diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/StringFormattingCheck.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/StringFormattingCheck.java new file mode 100644 index 0000000000000..48fa3ad6ee485 --- /dev/null +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/StringFormattingCheck.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.checkstyle; + +import com.puppycrawl.tools.checkstyle.StatelessCheck; +import com.puppycrawl.tools.checkstyle.api.AbstractCheck; +import com.puppycrawl.tools.checkstyle.api.DetailAST; +import com.puppycrawl.tools.checkstyle.api.TokenTypes; + +import java.util.Locale; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Checks for calls to {@link String#formatted(Object...)} that include format specifiers that + * are not locale-safe. This method always uses the default {@link Locale}, and so for our + * purposes it is safer to use {@link String#format(Locale, String, Object...)}. + *

+ * Note that this rule can currently only detect violations when calling formatted() + * on a string literal or text block. In theory, it could be extended to detect violations in + * local variables or statics. + */ +@StatelessCheck +public class StringFormattingCheck extends AbstractCheck { + + public static final String FORMATTED_MSG_KEY = "forbidden.formatted"; + + @Override + public int[] getDefaultTokens() { + return getRequiredTokens(); + } + + @Override + public int[] getAcceptableTokens() { + return getRequiredTokens(); + } + + @Override + public int[] getRequiredTokens() { + return new int[] { TokenTypes.METHOD_CALL }; + } + + @Override + public void visitToken(DetailAST ast) { + checkFormattedMethod(ast); + } + + // Originally pinched from java/util/Formatter.java but then modified. + // %[argument_index$][flags][width][.precision][t]conversion + private static final Pattern formatSpecifier = Pattern.compile("%(?:\\d+\\$)?(?:[-#+ 0,\\(<]*)?(?:\\d+)?(?:\\.\\d+)?([tT]?[a-zA-Z%])"); + + private void checkFormattedMethod(DetailAST ast) { + final DetailAST dotAst = ast.findFirstToken(TokenTypes.DOT); + if (dotAst == null) { + return; + } + + final String methodName = dotAst.findFirstToken(TokenTypes.IDENT).getText(); + if (methodName.equals("formatted") == false) { + return; + } + + final DetailAST subjectAst = dotAst.getFirstChild(); + + String stringContent = null; + if (subjectAst.getType() == TokenTypes.TEXT_BLOCK_LITERAL_BEGIN) { + stringContent = subjectAst.findFirstToken(TokenTypes.TEXT_BLOCK_CONTENT).getText(); + } else if (subjectAst.getType() == TokenTypes.STRING_LITERAL) { + stringContent = subjectAst.getText(); + } + + if (stringContent != null) { + final Matcher m = formatSpecifier.matcher(stringContent); + while (m.find()) { + char specifier = m.group(1).toLowerCase(Locale.ROOT).charAt(0); + + if (specifier == 'd' || specifier == 'e' || specifier == 'f' || specifier == 'g' || specifier == 't') { + log(ast, FORMATTED_MSG_KEY, m.group()); + } + } + } + } +} diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java index 17a845f166b10..453e5cb5d7e13 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java @@ -60,9 +60,6 @@ public void apply(Project project) { java.target("src/**/*.java"); - // Use `@formatter:off` and `@formatter:on` to toggle formatting - ONLY IF STRICTLY NECESSARY - java.toggleOffOn("@formatter:off", "@formatter:on"); - java.removeUnusedImports(); // We enforce a standard order for imports diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 4aa5eccf3f2e7..9b647270e54ea 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -6,6 +6,9 @@ * Side Public License, v 1. */ +import org.apache.tools.ant.filters.ReplaceTokens +import org.elasticsearch.gradle.internal.conventions.info.GitInfo + plugins { id 'java-gradle-plugin' id 'groovy-gradle-plugin' @@ -199,36 +202,43 @@ configurations { integTestRuntimeOnly.extendsFrom(testRuntimeOnly) } dependencies { + constraints { + api("com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.getProperty('jackson')}") { + version { + strictly "${versions.getProperty('jackson')}" + } + because 'We want to use the exact same jackson version we use in production' + } + } api localGroovy() api gradleApi() api "org.elasticsearch:build-conventions:$version" api "org.elasticsearch.gradle:build-tools:$version" - api 'commons-codec:commons-codec:1.12' + // same version as http client transitive dep + api 'commons-codec:commons-codec:1.11' api 'org.apache.commons:commons-compress:1.19' api 'org.apache.ant:ant:1.10.8' - api 'com.netflix.nebula:gradle-extra-configurations-plugin:5.0.1' - api 'com.netflix.nebula:gradle-info-plugin:9.2.0' + api 'com.netflix.nebula:gradle-info-plugin:11.1.0' api 'org.apache.rat:apache-rat:0.11' api "net.java.dev.jna:jna:${versions.getProperty('jna')}" - api 'gradle.plugin.com.github.jengelman.gradle.plugins:shadow:7.0.0' + api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' // for our ide tweaking - api 'gradle.plugin.org.jetbrains.gradle.plugin.idea-ext:gradle-idea-ext:0.7' + api 'gradle.plugin.org.jetbrains.gradle.plugin.idea-ext:gradle-idea-ext:1.1.1' // When upgrading forbidden apis, ensure dependency version is bumped in ThirdPartyPrecommitPlugin as well - api 'de.thetaphi:forbiddenapis:3.1' - api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.0' + api 'de.thetaphi:forbiddenapis:3.2' + api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.13' api 'org.apache.maven:maven-model:3.6.2' - api 'com.networknt:json-schema-validator:1.0.36' + api 'com.networknt:json-schema-validator:1.0.65' api "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.getProperty('jackson')}" - api 'org.ow2.asm:asm:9.0' - api 'org.ow2.asm:asm-tree:9.0' + api 'org.ow2.asm:asm:9.2' + api 'org.ow2.asm:asm-tree:9.2' api "org.apache.httpcomponents:httpclient:${versions.getProperty('httpclient')}" api "org.apache.httpcomponents:httpcore:${versions.getProperty('httpcore')}" compileOnly "com.puppycrawl.tools:checkstyle:${versions.getProperty('checkstyle')}" runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation "com.puppycrawl.tools:checkstyle:${versions.getProperty('checkstyle')}" -// testImplementation "junit:junit:${versions.getProperty('junit')}" testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.23.2' testImplementation 'org.mockito:mockito-core:1.9.5' testImplementation "org.hamcrest:hamcrest:${versions.getProperty('hamcrest')}" @@ -253,16 +263,18 @@ dependencies { because 'allows tests to run from IDEs that bundle older version of launcher' } - testImplementation platform("org.spockframework:spock-bom:2.0-M5-groovy-3.0") + testImplementation platform("org.spockframework:spock-bom:2.0-groovy-3.0") testImplementation("org.spockframework:spock-core") { exclude module: "groovy" } - integTestImplementation platform("org.spockframework:spock-bom:2.0-M5-groovy-3.0") + integTestImplementation platform("org.spockframework:spock-bom:2.0-groovy-3.0") integTestImplementation("org.spockframework:spock-core") { exclude module: "groovy" } // required as we rely on junit4 rules - integTestImplementation "org.spockframework:spock-junit4" + integTestImplementation("org.spockframework:spock-junit4") { + exclude module: "groovy" + } testImplementation "org.spockframework:spock-junit4" integTestImplementation "org.xmlunit:xmlunit-core:2.8.2" } @@ -279,3 +291,10 @@ tasks.register("integTest", Test) { } tasks.named("check").configure { dependsOn("integTest") } + +tasks.register("bootstrapPerformanceTests", Copy) { + from('performance') + into('build/performanceTests') + def root = file('..') + filter(ReplaceTokens, tokens: [testGitCommit:GitInfo.gitInfo(root).revision]) +} diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index fb04efa8e8848..7cec6af44e192 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.3-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=00b273629df4ce46e68df232161d5a7c4e495b9a029ce6e0420f071e21316867 +distributionSha256Sum=c9490e938b221daf0094982288e4038deed954a3f12fb54cbf270ddf4e37d879 diff --git a/build-tools-internal/performance/elasticsearch-build-tool-update.scenarios b/build-tools-internal/performance/elasticsearch-build-tool-update.scenarios new file mode 100644 index 0000000000000..27ff2443e7371 --- /dev/null +++ b/build-tools-internal/performance/elasticsearch-build-tool-update.scenarios @@ -0,0 +1,104 @@ +# Can specify scenarios to use when none are specified on the command line +default-scenarios = ["buildConfiguration_master", "buildConfiguration_branch", "single_project_master", "single_project_branch", "precommit_master", "precommit_branch"] + +buildConfiguration_branch { + title = "configuration phase (@testGitCommit@)" + tasks = ["help"] + gradle-args = ["--no-scan", "--no-build-cache"] + run-using = cli // value can be "cli" or "tooling-api" + daemon = warm // value can be "warm", "cold", or "none" + warm-ups = 5 + iterations = 10 + system-properties { + "BUILD_PERFORMANCE_TEST" = "true" + } + git-checkout = { + build = "@testGitCommit@" + } +} + +buildConfiguration_master { + title = "configuration phase (master)" + tasks = ["help"] + gradle-args = ["--no-scan", "--no-build-cache"] + run-using = cli // value can be "cli" or "tooling-api" + daemon = warm // value can be "warm", "cold", or "none" + warm-ups = 5 + iterations = 10 + system-properties { + "BUILD_PERFORMANCE_TEST" = "true" + } + git-checkout = { + build = "master" + } +} + +precommit_branch { + title = "precommit (@testGitCommit@)" + cleanup-tasks = ["clean"] + tasks = ["precommit"] + gradle-args = ["--no-scan", "--no-build-cache"] + run-using = cli // value can be "cli" or "tooling-api" + daemon = warm // value can be "warm", "cold", or "none" + warm-ups = 5 + iterations = 10 + system-properties { + "BUILD_PERFORMANCE_TEST" = "true" + } + git-checkout = { + build = "@testGitCommit@" + } +} + +precommit_master { + title = "precommit (master)" + cleanup-tasks = ["clean"] + tasks = ["precommit"] + gradle-args = ["--no-scan", "--no-build-cache"] + run-using = cli // value can be "cli" or "tooling-api" + daemon = warm // value can be "warm", "cold", or "none" + warm-ups = 5 + iterations = 10 + system-properties { + "BUILD_PERFORMANCE_TEST" = "true" + } + git-checkout = { + build = "master" + } +} + +single_project_branch { + title = "single project (@testGitCommit@)" + cleanup-tasks = [":server:clean"] + tasks = [":server:spotlessApply", ":server:precommit"] + gradle-args = ["--no-scan"] + apply-abi-change-to = "server/src/main/java/org/elasticsearch/Build.java" + run-using = cli // value can be "cli" or "tooling-api" + daemon = warm // value can be "warm", "cold", or "none" + warm-ups = 5 + iterations = 10 + system-properties { + "BUILD_PERFORMANCE_TEST" = "true" + } + git-checkout = { + build = "@testGitCommit@" + } +} + +single_project_master { + title = "single project (master)" + cleanup-tasks = [":server:clean"] + tasks = [":server:spotlessApply", ":server:precommit"] + gradle-args = ["--no-scan"] + apply-abi-change-to = "server/src/main/java/org/elasticsearch/Build.java" + run-using = cli // value can be "cli" or "tooling-api" + daemon = warm // value can be "warm", "cold", or "none" + warm-ups = 5 + iterations = 10 + system-properties { + "BUILD_PERFORMANCE_TEST" = "true" + } + git-checkout = { + build = "master" + } +} diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy index 2e397c6e05ccf..2d1a6193189d7 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy @@ -18,7 +18,6 @@ import spock.lang.Unroll * Test is ignored on ARM since this test case tests the ability to build certain older BWC branches that we don't support on ARM */ -//@Ignore("https://github.com/elastic/elasticsearch/issues/79929") @IgnoreIf({ Architecture.current() == Architecture.AARCH64 }) class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleFuncTest { diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/build-tools-internal/src/main/resources/minimumCompilerVersion b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/build-tools-internal/src/main/resources/minimumCompilerVersion index b6a7d89c68e0c..98d9bcb75a685 100644 --- a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/build-tools-internal/src/main/resources/minimumCompilerVersion +++ b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/build-tools-internal/src/main/resources/minimumCompilerVersion @@ -1 +1 @@ -16 +17 diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle index 5d0716d5d9959..50db02d9e21a1 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle @@ -9,8 +9,8 @@ import java.nio.file.Files String buildNumber = System.getenv('BUILD_NUMBER') - -if (buildNumber) { +String performanceTest = System.getenv('BUILD_PERFORMANCE_TEST') +if (buildNumber && performanceTest == null) { File uploadFile = file("build/${buildNumber}.tar.bz2") project.gradle.buildFinished { result -> println "build complete, generating: $uploadFile" diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index f30cbe063d01b..84b35b9a7568c 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -88,7 +88,7 @@ if (providers.systemProperty('idea.active').forUseAtConfigurationTime().getOrNul tasks.register('buildDependencyArtifacts') { group = 'ide' description = 'Builds artifacts needed as dependency for IDE modules' - dependsOn ':client:rest-high-level:shadowJar', ':plugins:repository-hdfs:hadoop-client-api:shadowJar', ':plugins:repository-azure:azure-storage-blob:shadowJar' + dependsOn ':client:rest-high-level:shadowJar', ':plugins:repository-hdfs:hadoop-client-api:shadowJar', ':modules:repository-azure:azure-storage-blob:shadowJar' } idea { @@ -121,7 +121,6 @@ if (providers.systemProperty('idea.active').forUseAtConfigurationTime().getOrNul vmParameters = [ '-ea', '-Djava.locale.providers=SPI,COMPAT', - "--illegal-access=deny", // TODO: only open these for mockito when it is modularized '--add-opens=java.base/java.security.cert=ALL-UNNAMED', '--add-opens=java.base/java.nio.channels=ALL-UNNAMED', diff --git a/build-tools-internal/src/main/groovy/elasticsearch.run.gradle b/build-tools-internal/src/main/groovy/elasticsearch.run.gradle index d5e400be0f10e..4eb4cdcdc32d8 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.run.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.run.gradle @@ -25,9 +25,11 @@ testClusters.register("runTask") { } else if (licenseType != 'basic') { throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "[basic] or [trial].") } + // Not enabled by default in the build, otherwise all integration tests will download the databases from our geoip download service: + systemProperty 'ingest.geoip.downloader.enabled.default', 'true' setting 'xpack.security.enabled', 'true' keystore 'bootstrap.password', 'password' - user username: 'elastic-admin', password: 'elastic-password', role: 'superuser' + user username: 'elastic-admin', password: 'elastic-password', role: '_es_test_root' } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java index 7a57aabfe5f1b..b5e3ea074ca84 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java @@ -70,7 +70,7 @@ private TaskProvider createRunBwcGradleTask(Project project, String @Override public void execute(Task t) { // Execution time so that the checkouts are available - String compilerVersionInfoPath = minimumCompilerVersionPath(unreleasedVersionInfo.get().version); + String compilerVersionInfoPath = minimumCompilerVersionPath(unreleasedVersionInfo.get().version()); String minimumCompilerVersion = readFromFile(new File(checkoutDir.get(), compilerVersionInfoPath)); loggedExec.environment("JAVA_HOME", getJavaHome(Integer.parseInt(minimumCompilerVersion))); } @@ -108,8 +108,8 @@ public void execute(Task t) { if (project.getGradle().getStartParameter().isParallelProjectExecutionEnabled()) { loggedExec.args("--parallel"); } - loggedExec.setStandardOutput(new IndentingOutputStream(System.out, unreleasedVersionInfo.get().version)); - loggedExec.setErrorOutput(new IndentingOutputStream(System.err, unreleasedVersionInfo.get().version)); + loggedExec.setStandardOutput(new IndentingOutputStream(System.out, unreleasedVersionInfo.get().version())); + loggedExec.setErrorOutput(new IndentingOutputStream(System.err, unreleasedVersionInfo.get().version())); configAction.execute(loggedExec); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java index f936913c79375..849db561ad5d3 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; -import org.jetbrains.annotations.NotNull; import java.util.ArrayList; import java.util.Collections; @@ -18,7 +17,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; @@ -67,7 +65,7 @@ public class BwcVersions { private static final Pattern LINE_PATTERN = Pattern.compile( "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)? .*?LUCENE_(\\d+)_(\\d+)_(\\d+)\\);" ); - private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.16.0"); + private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); private final VersionPair currentVersion; private final List versions; @@ -236,11 +234,11 @@ public List getIndexCompatible() { ); } - public void withIndexCompatiple(BiConsumer versionAction) { + public void withIndexCompatible(BiConsumer versionAction) { getIndexCompatible().forEach(v -> versionAction.accept(v, "v" + v.toString())); } - public void withIndexCompatiple(Predicate filter, BiConsumer versionAction) { + public void withIndexCompatible(Predicate filter, BiConsumer versionAction) { getIndexCompatible().stream().filter(filter).forEach(v -> versionAction.accept(v, "v" + v.toString())); } @@ -250,11 +248,11 @@ public List getWireCompatible() { ); } - public void withWireCompatiple(BiConsumer versionAction) { + public void withWireCompatible(BiConsumer versionAction) { getWireCompatible().forEach(v -> versionAction.accept(v, "v" + v.toString())); } - public void withWireCompatiple(Predicate filter, BiConsumer versionAction) { + public void withWireCompatible(Predicate filter, BiConsumer versionAction) { getWireCompatible().stream().filter(filter).forEach(v -> versionAction.accept(v, "v" + v.toString())); } @@ -276,42 +274,16 @@ public List getUnreleasedWireCompatible() { return unmodifiableList(unreleasedWireCompatible); } - public static class UnreleasedVersionInfo { - public final Version version; - public final String branch; - public final String gradleProjectPath; - - public UnreleasedVersionInfo(Version version, String branch, String gradleProjectPath) { - this.version = version; - this.branch = branch; - this.gradleProjectPath = gradleProjectPath; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - UnreleasedVersionInfo that = (UnreleasedVersionInfo) o; - return version.equals(that.version) && branch.equals(that.branch) && gradleProjectPath.equals(that.gradleProjectPath); - } - - @Override - public int hashCode() { - return Objects.hash(version, branch, gradleProjectPath); - } + public Version getMinimumWireCompatibleVersion() { + return MINIMUM_WIRE_COMPATIBLE_VERSION; } - public static class VersionPair implements Comparable { - public final Version elasticsearch; - public final Version lucene; + public record UnreleasedVersionInfo(Version version, String branch, String gradleProjectPath) {} - public VersionPair(Version elasticsearch, Version lucene) { - this.elasticsearch = elasticsearch; - this.lucene = lucene; - } + public record VersionPair(Version elasticsearch, Version lucene) implements Comparable { @Override - public int compareTo(@NotNull VersionPair o) { + public int compareTo(VersionPair o) { // For ordering purposes, sort by Elasticsearch version return this.elasticsearch.compareTo(o.elasticsearch); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DependenciesGraphTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DependenciesGraphTask.java index c1c4dff63556c..3ffaad5cb5a1b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DependenciesGraphTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DependenciesGraphTask.java @@ -85,55 +85,46 @@ void generateDependenciesGraph() { for (final Dependency dependency : runtimeDependencies) { final String id = dependency.getGroup() + ":" + dependency.getName(); final String versionedId = id + "@" + dependency.getVersion(); - final StringBuilder packageString = new StringBuilder(); final StringBuilder nodeString = new StringBuilder(); if (dependency instanceof ProjectDependency) { continue; } - packageString.append("{\"id\": \"") - .append(versionedId) - .append("\",\"info\": {\"name\": \"") - .append(id) - .append("\",\"version\": \"") - .append(dependency.getVersion()) - .append("\"}}"); - packages.add(packageString.toString()); - nodeString.append("{\"nodeId\": \"") - .append(versionedId) - .append("\",\"pkgId\": \"") - .append(versionedId) - .append("\",\"deps\": []}"); + packages.add(""" + {"id": "%s","info": {"name": "%s","version": "%s"}}\ + """.formatted(versionedId, id, dependency.getVersion())); + nodeString.append(""" + {"nodeId": "%s","pkgId": "%s","deps": []}\ + """.formatted(versionedId, versionedId)); nodes.add(nodeString.toString()); - nodeIds.add("{\"nodeId\": \"" + versionedId + "\"}"); + nodeIds.add(""" + {"nodeId": "%s"}\ + """.formatted(versionedId)); } // We add one package and one node for each dependency, it suffices to check packages. if (packages.size() > 0) { final String projectName = "elastic/elasticsearch" + getProject().getPath(); - final StringBuilder output = new StringBuilder(); - output.append("{\"depGraph\": {\"schemaVersion\": \"1.2.0\",\"pkgManager\": {\"name\": \"gradle\"},\"pkgs\": [") - .append("{\"id\": \"") - .append(projectName) - .append("@0.0.0") - .append("\", \"info\": {\"name\": \"") - .append(projectName) - .append("\", \"version\": \"0.0.0\"}},") - .append(String.join(",", packages)) - .append("],\"graph\": {\"rootNodeId\": \"") - .append(projectName) - .append("@0.0.0") - .append("\",\"nodes\": [") - .append("{\"nodeId\": \"") - .append(projectName) - .append("@0.0.0") - .append("\",\"pkgId\": \"") - .append(projectName) - .append("@0.0.0") - .append("\",\"deps\": [") - .append(String.join(",", nodeIds)) - .append("]},") - .append(String.join(",", nodes)) - .append("]}}}"); - getLogger().debug("Dependency Graph: " + output.toString()); + final String output = """ + { + "depGraph": { + "schemaVersion": "1.2.0", + "pkgManager": {"name": "gradle"}, + "pkgs": [ + { + "id": "%s@0.0.0", + "info": {"name": "%1$s", "version": "0.0.0"} + }, + %s + ], + "graph": { + "rootNodeId": "%1$s@0.0.0", + "nodes": [ + { "nodeId": "%1$s@0.0.0","pkgId": "%1$s@0.0.0","deps": [%s] }, + %s + ] + } + } + }""".formatted(projectName, String.join(",", packages), String.join(",", nodeIds), String.join(",", nodes)); + getLogger().debug("Dependency Graph: " + output); try (CloseableHttpClient client = HttpClients.createDefault()) { HttpPost postRequest = new HttpPost(url); postRequest.addHeader("Authorization", "token " + token); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DependenciesInfoTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DependenciesInfoTask.java index 70c4866e09963..6d4a4906d90ff 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DependenciesInfoTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DependenciesInfoTask.java @@ -181,20 +181,20 @@ protected String getLicenseType(final String group, final String name) throws IO String licenseType; final LicenseAnalyzer.LicenseInfo licenseInfo = LicenseAnalyzer.licenseType(license); - if (licenseInfo.isSpdxLicense() == false) { + if (licenseInfo.spdxLicense() == false) { // License has not be identified as SPDX. // As we have the license file, we create a Custom entry with the URL to this license file. final String gitBranch = System.getProperty("build.branch", "master"); final String githubBaseURL = "https://raw.githubusercontent.com/elastic/elasticsearch/" + gitBranch + "/"; - licenseType = licenseInfo.getIdentifier() + licenseType = licenseInfo.identifier() + ";" + license.getCanonicalPath().replaceFirst(".*/elasticsearch/", githubBaseURL) + ","; } else { - licenseType = licenseInfo.getIdentifier() + ","; + licenseType = licenseInfo.identifier() + ","; } - if (licenseInfo.isSourceRedistributionRequired()) { + if (licenseInfo.sourceRedistributionRequired()) { final File sources = getDependencyInfoFile(group, name, "SOURCES"); licenseType += Files.readString(sources.toPath()).trim(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 3a023b9d99b95..4e68cfb675b12 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -123,7 +123,7 @@ public static void configureCompile(Project project) { // TODO Discuss moving compileOptions.getCompilerArgs() to use provider api with Gradle team. List compilerArgs = compileOptions.getCompilerArgs(); compilerArgs.add("-Werror"); - compilerArgs.add("-Xlint:all,-path,-serial,-options,-deprecation,-try"); + compilerArgs.add("-Xlint:all,-path,-serial,-options,-deprecation,-try,-removal"); compilerArgs.add("-Xdoclint:all"); compilerArgs.add("-Xdoclint:-missing"); compileOptions.setEncoding("UTF-8"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 840a35b4dea6c..c33545ada9938 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -96,7 +96,7 @@ public void execute(Task t) { test.jvmArgs( "-Xmx" + System.getProperty("tests.heap.size", "512m"), "-Xms" + System.getProperty("tests.heap.size", "512m"), - "--illegal-access=deny", + "-Djava.security.manager=allow", // TODO: only open these for mockito when it is modularized "--add-opens=java.base/java.security.cert=ALL-UNNAMED", "--add-opens=java.base/java.nio.channels=ALL-UNNAMED", @@ -187,7 +187,7 @@ public void execute(Task t) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); FileCollection mainRuntime = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME).getRuntimeClasspath(); // Add any "shadow" dependencies. These are dependencies that are *not* bundled into the shadow JAR - Configuration shadowConfig = project.getConfigurations().getByName(ShadowBasePlugin.getCONFIGURATION_NAME()); + Configuration shadowConfig = project.getConfigurations().getByName(ShadowBasePlugin.CONFIGURATION_NAME); // Add the shadow JAR artifact itself FileCollection shadowJar = project.files(project.getTasks().named("shadowJar")); FileCollection testRuntime = sourceSets.getByName(SourceSet.TEST_SOURCE_SET_NAME).getRuntimeClasspath(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index a41ce13daf24f..7eeff518e21ce 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -59,7 +59,7 @@ public void apply(Project project) { BuildParams.getBwcVersions() .forPreviousUnreleased( (BwcVersions.UnreleasedVersionInfo unreleasedVersion) -> { - configureBwcProject(project.project(unreleasedVersion.gradleProjectPath), unreleasedVersion, bwcTaskThrottleProvider); + configureBwcProject(project.project(unreleasedVersion.gradleProjectPath()), unreleasedVersion, bwcTaskThrottleProvider); } ); } @@ -70,13 +70,13 @@ private void configureBwcProject( Provider bwcTaskThrottleProvider ) { Provider versionInfoProvider = providerFactory.provider(() -> versionInfo); - Provider checkoutDir = versionInfoProvider.map(info -> new File(project.getBuildDir(), "bwc/checkout-" + info.branch)); + Provider checkoutDir = versionInfoProvider.map(info -> new File(project.getBuildDir(), "bwc/checkout-" + info.branch())); BwcSetupExtension bwcSetupExtension = project.getExtensions() .create("bwcSetup", BwcSetupExtension.class, project, versionInfoProvider, bwcTaskThrottleProvider, checkoutDir); BwcGitExtension gitExtension = project.getPlugins().apply(InternalBwcGitPlugin.class).getGitExtension(); - Provider bwcVersion = versionInfoProvider.map(info -> info.version); - gitExtension.setBwcVersion(versionInfoProvider.map(info -> info.version)); - gitExtension.setBwcBranch(versionInfoProvider.map(info -> info.branch)); + Provider bwcVersion = versionInfoProvider.map(info -> info.version()); + gitExtension.setBwcVersion(versionInfoProvider.map(info -> info.version())); + gitExtension.setBwcBranch(versionInfoProvider.map(info -> info.branch())); gitExtension.getCheckoutDir().set(checkoutDir); // we want basic lifecycle tasks like `clean` here. diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index df88e1b86035d..6cb8334ff4f00 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -50,7 +50,7 @@ public void apply(Project project) { DockerSupportPlugin.DOCKER_SUPPORT_SERVICE_NAME ); distributionDownloadPlugin.setDockerAvailability( - dockerSupport.map(dockerSupportService -> dockerSupportService.getDockerAvailability().isAvailable) + dockerSupport.map(dockerSupportService -> dockerSupportService.getDockerAvailability().isAvailable()) ); registerInternalDistributionResolutions(DistributionDownloadPlugin.getRegistrationsContainer(project)); } @@ -88,7 +88,7 @@ private void registerInternalDistributionResolutions(NamedDomainObjectContainer< } String projectConfig = getProjectConfig(distribution, unreleasedInfo); return new ProjectBasedDistributionDependency( - (config) -> projectDependency(project, unreleasedInfo.gradleProjectPath, projectConfig) + (config) -> projectDependency(project, unreleasedInfo.gradleProjectPath(), projectConfig) ); } return null; @@ -107,7 +107,7 @@ private boolean isCurrentVersion(ElasticsearchDistribution distribution) { private static String getProjectConfig(ElasticsearchDistribution distribution, BwcVersions.UnreleasedVersionInfo info) { String distributionProjectName = distributionProjectName(distribution); if (distribution.getType().shouldExtract()) { - return (info.gradleProjectPath.equals(":distribution") || info.version.before("7.10.0")) + return (info.gradleProjectPath().equals(":distribution") || info.version().before("7.10.0")) ? distributionProjectName : "expanded-" + distributionProjectName; } else { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java index 214ce9ecaa0a9..d86ec9001d415 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java @@ -14,7 +14,6 @@ import org.gradle.api.model.ObjectFactory; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.TaskAction; -import org.gradle.internal.deprecation.DeprecatableConfiguration; import java.util.Collection; import java.util.stream.Collectors; @@ -50,8 +49,7 @@ private static boolean canBeResolved(Configuration configuration) { if (configuration.isCanBeResolved() == false) { return false; } - if (configuration instanceof org.gradle.internal.deprecation.DeprecatableConfiguration) { - var deprecatableConfiguration = (DeprecatableConfiguration) configuration; + if (configuration instanceof org.gradle.internal.deprecation.DeprecatableConfiguration deprecatableConfiguration) { if (deprecatableConfiguration.canSafelyBeResolved() == false) { return false; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTar.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTar.java index 55d325fbde2c4..29c7dfd422547 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTar.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTar.java @@ -44,18 +44,11 @@ public class SymbolicLinkPreservingTar extends Tar { @Override protected CopyAction createCopyAction() { - final ArchiveOutputStreamFactory compressor; - switch (getCompression()) { - case BZIP2: - compressor = Bzip2Archiver.getCompressor(); - break; - case GZIP: - compressor = GzipArchiver.getCompressor(); - break; - default: - compressor = new SimpleCompressor(); - break; - } + final ArchiveOutputStreamFactory compressor = switch (getCompression()) { + case BZIP2 -> Bzip2Archiver.getCompressor(); + case GZIP -> GzipArchiver.getCompressor(); + default -> new SimpleCompressor(); + }; return new SymbolicLinkPreservingTarCopyAction(getArchiveFile(), compressor, isPreserveFileTimestamps()); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java index 552b3bec8674c..4d78a0a7c36d1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java @@ -154,8 +154,11 @@ void failIfDockerUnavailable(List tasks) { // Some other problem, print the error final String message = String.format( Locale.ROOT, - "a problem occurred while using Docker from [%s]%s yet it is required to run the following task%s: \n%s\n" - + "the problem is that Docker exited with exit code [%d] with standard error output:\n%s", + """ + a problem occurred while using Docker from [%s]%s yet it is required to run the following task%s: + %s + the problem is that Docker exited with exit code [%d] with standard error output: + %s""", availability.path, availability.version == null ? "" : " v" + availability.version, tasks.size() > 1 ? "s" : "", @@ -305,8 +308,8 @@ private Result runCommand(String... args) { /** * An immutable class that represents the results of a Docker search from {@link #getDockerAvailability()}}. */ - public static class DockerAvailability { - /** + public record DockerAvailability( + /* * Indicates whether Docker is available and meets the required criteria. * True if, and only if, Docker is: *

    @@ -316,71 +319,32 @@ public static class DockerAvailability { *
  • Can execute a command that requires privileges
  • *
*/ - public final boolean isAvailable; + boolean isAvailable, - /** - * True if docker-compose is available. - */ - public final boolean isComposeAvailable; + // True if docker-compose is available. + boolean isComposeAvailable, - /** - * True if the installed Docker version is >= 17.05 - */ - public final boolean isVersionHighEnough; + // True if the installed Docker version is >,= 17.05 + boolean isVersionHighEnough, - /** - * The path to the Docker CLI, or null - */ - public final String path; + // The path to the Docker CLI, or null + String path, - /** - * The installed Docker version, or null - */ - public final Version version; + // The installed Docker version, or null + Version version, - /** - * Information about the last command executes while probing Docker, or null. - */ - final Result lastCommand; - - DockerAvailability( - boolean isAvailable, - boolean isComposeAvailable, - boolean isVersionHighEnough, - String path, - Version version, - Result lastCommand - ) { - this.isAvailable = isAvailable; - this.isComposeAvailable = isComposeAvailable; - this.isVersionHighEnough = isVersionHighEnough; - this.path = path; - this.version = version; - this.lastCommand = lastCommand; - } - } + // Information about the last command executes while probing Docker, or null. + Result lastCommand + ) {} /** * This class models the result of running a command. It captures the exit code, standard output and standard error. */ - private static class Result { - final int exitCode; - final String stdout; - final String stderr; - - Result(int exitCode, String stdout, String stderr) { - this.exitCode = exitCode; - this.stdout = stdout; - this.stderr = stderr; - } + private record Result(int exitCode, String stdout, String stderr) { boolean isSuccess() { return exitCode == 0; } - - public String toString() { - return "exitCode = [" + exitCode + "] " + "stdout = [" + stdout.trim() + "] " + "stderr = [" + stderr.trim() + "]"; - } } interface Parameters extends BuildServiceParameters { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/TransformLog4jConfigFilter.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/TransformLog4jConfigFilter.java index a8aa68641fd37..4be9380388a74 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/TransformLog4jConfigFilter.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/TransformLog4jConfigFilter.java @@ -87,23 +87,17 @@ static List transformConfig(List lines) { } switch (keyParts[2]) { - case "type": + case "type" -> { if (value.equals("RollingFile")) { value = "Console"; } line = key + " = " + value; - break; - - case "fileName": - case "filePattern": - case "policies": - case "strategy": + } + case "fileName", "filePattern", "policies", "strategy" -> { // No longer applicable. Omit it. skipNext = line.endsWith("\\"); continue; - - default: - break; + } } } else if (line.startsWith("rootLogger.appenderRef")) { String[] parts = line.split("\\s*=\\s*"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java index f338f0b55d5e5..4d1b0c76598de 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java @@ -266,7 +266,7 @@ private void checkDependencies( File licenseFile = new File(licensesDir, getFileName(dependencyName, licenses, "LICENSE")); LicenseInfo licenseInfo = LicenseAnalyzer.licenseType(licenseFile); - if (licenseInfo.isSourceRedistributionRequired()) { + if (licenseInfo.sourceRedistributionRequired()) { checkFile(dependencyName, jarName, sources, "SOURCES"); } } @@ -318,16 +318,11 @@ private void checkSha(File jar, String jarName, Set shaFiles) throws NoSuc String sha = getSha1(jar); if (expectedSha.equals(sha) == false) { - final String exceptionMessage = String.format( - Locale.ROOT, - "SHA has changed! Expected %s for %s but got %s." - + "\nThis usually indicates a corrupt dependency cache or artifacts changed upstream." - + "\nEither wipe your cache, fix the upstream artifact, or delete %s and run updateShas", - expectedSha, - jarName, - sha, - shaFile - ); + final String exceptionMessage = String.format(Locale.ROOT, """ + SHA has changed! Expected %s for %s but got %s. + This usually indicates a corrupt dependency cache or artifacts changed upstream. + Either wipe your cache, fix the upstream artifact, or delete %s and run updateShas + """, expectedSha, jarName, sha, shaFile); throw new GradleException(exceptionMessage); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java index 163e9f1e13d07..10efa35695cd4 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java @@ -17,7 +17,6 @@ import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin; import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.util.GradleUtils; -import org.gradle.api.JavaVersion; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.plugins.ExtraPropertiesExtension; @@ -41,6 +40,7 @@ public TaskProvider createTask(Project project) { resourcesTask.configure(t -> { t.setOutputDir(resourcesDir.toFile()); t.copy("forbidden/jdk-signatures.txt"); + t.copy("forbidden/jdk-deprecated.txt"); t.copy("forbidden/es-all-signatures.txt"); t.copy("forbidden/es-test-signatures.txt"); t.copy("forbidden/http-signatures.txt"); @@ -65,14 +65,14 @@ public TaskProvider createTask(Project project) { SourceSet sourceSet = sourceSets.getByName(sourceSetName); t.setClasspath(project.files(sourceSet.getRuntimeClasspath()).plus(sourceSet.getCompileClasspath())); - t.setTargetCompatibility(BuildParams.getRuntimeJavaVersion().getMajorVersion()); - if (BuildParams.getRuntimeJavaVersion().compareTo(JavaVersion.VERSION_14) > 0) { - // TODO: forbidden apis does not yet support java 15, rethink using runtime version - t.setTargetCompatibility(JavaVersion.VERSION_14.getMajorVersion()); - } - t.setBundledSignatures(Set.of("jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out")); + t.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion().getMajorVersion()); + t.setBundledSignatures(Set.of("jdk-unsafe", "jdk-non-portable", "jdk-system-out")); t.setSignaturesFiles( - project.files(resourcesDir.resolve("forbidden/jdk-signatures.txt"), resourcesDir.resolve("forbidden/es-all-signatures.txt")) + project.files( + resourcesDir.resolve("forbidden/jdk-signatures.txt"), + resourcesDir.resolve("forbidden/es-all-signatures.txt"), + resourcesDir.resolve("forbidden/jdk-deprecated.txt") + ) ); t.setSuppressAnnotations(Set.of("**.SuppressForbidden")); if (t.getName().endsWith("Test")) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java index df61f2f84a7fa..c52ea9aaeb6f5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java @@ -22,68 +22,55 @@ public class LicenseAnalyzer { */ private static final LicenseMatcher[] matchers = new LicenseMatcher[] { new LicenseMatcher("Apache-2.0", true, false, Pattern.compile("Apache.*License.*[vV]ersion.*2\\.0", Pattern.DOTALL)), - new LicenseMatcher( - "BSD-2-Clause", - true, - false, - Pattern.compile( - ("Redistribution and use in source and binary forms, with or without\n" - + "modification, are permitted provided that the following conditions\n" - + "are met:\n" - + "\n" - + " 1\\. Redistributions of source code must retain the above copyright\n" - + " notice, this list of conditions and the following disclaimer\\.\n" - + " 2\\. Redistributions in binary form must reproduce the above copyright\n" - + " notice, this list of conditions and the following disclaimer in the\n" - + " documentation and/or other materials provided with the distribution\\.\n" - + "\n" - + "THIS SOFTWARE IS PROVIDED BY .+ (``|''|\")AS IS(''|\") AND ANY EXPRESS OR\n" - + "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n" - + "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED\\.\n" - + "IN NO EVENT SHALL .+ BE LIABLE FOR ANY DIRECT, INDIRECT,\n" - + "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \\(INCLUDING, BUT\n" - + "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n" - + "DATA, OR PROFITS; OR BUSINESS INTERRUPTION\\) HOWEVER CAUSED AND ON ANY\n" - + "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n" - + "\\(INCLUDING NEGLIGENCE OR OTHERWISE\\) ARISING IN ANY WAY OUT OF THE USE OF\n" - + "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\\.").replaceAll("\\s+", "\\\\s*"), - Pattern.DOTALL - ) - ), - new LicenseMatcher( - "BSD-3-Clause", - true, - false, - Pattern.compile( - ("\n" - + "Redistribution and use in source and binary forms, with or without\n" - + "modification, are permitted provided that the following conditions\n" - + "are met:\n" - + "\n" - + " (1\\.)? Redistributions of source code must retain the above copyright\n" - + " notice, this list of conditions and the following disclaimer\\.\n" - + " (2\\.)? Redistributions in binary form must reproduce the above copyright\n" - + " notice, this list of conditions and the following disclaimer in the\n" - + " documentation and/or other materials provided with the distribution\\.\n" - + " ((3\\.)? The name of .+ may not be used to endorse or promote products\n" - + " derived from this software without specific prior written permission\\.|\n" - + " (3\\.)? Neither the name of .+ nor the names of its\n" - + " contributors may be used to endorse or promote products derived from\n" - + " this software without specific prior written permission\\.)\n" - + "\n" - + "THIS SOFTWARE IS PROVIDED BY .+ (``|''|\")AS IS(''|\") AND ANY EXPRESS OR\n" - + "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n" - + "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED\\.\n" - + "IN NO EVENT SHALL .+ BE LIABLE FOR ANY DIRECT, INDIRECT,\n" - + "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \\(INCLUDING, BUT\n" - + "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n" - + "DATA, OR PROFITS; OR BUSINESS INTERRUPTION\\) HOWEVER CAUSED AND ON ANY\n" - + "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n" - + "\\(INCLUDING NEGLIGENCE OR OTHERWISE\\) ARISING IN ANY WAY OUT OF THE USE OF\n" - + "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\\.\n").replaceAll("\\s+", "\\\\s*"), - Pattern.DOTALL - ) - ), + new LicenseMatcher("BSD-2-Clause", true, false, Pattern.compile((""" + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1\\. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer\\. + 2\\. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution\\. + + THIS SOFTWARE IS PROVIDED BY .+ (``|''|")AS IS(''|") AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED\\. + IN NO EVENT SHALL .+ BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \\(INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION\\) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + \\(INCLUDING NEGLIGENCE OR OTHERWISE\\) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\\.""").replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL)), + new LicenseMatcher("BSD-3-Clause", true, false, Pattern.compile((""" + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + (1\\.)? Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer\\. + (2\\.)? Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution\\. + ((3\\.)? The name of .+ may not be used to endorse or promote products + derived from this software without specific prior written permission\\.| + (3\\.)? Neither the name of .+ nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission\\.) + + THIS SOFTWARE IS PROVIDED BY .+ (``|''|")AS IS(''|") AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED\\. + IN NO EVENT SHALL .+ BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \\(INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION\\) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + \\(INCLUDING NEGLIGENCE OR OTHERWISE\\) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\\. + """).replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL)), new LicenseMatcher( "CDDL-1.0", true, @@ -97,51 +84,40 @@ public class LicenseAnalyzer { Pattern.compile("COMMON DEVELOPMENT AND DISTRIBUTION LICENSE.*Version 1.1", Pattern.DOTALL) ), new LicenseMatcher("ICU", true, false, Pattern.compile("ICU License - ICU 1.8.1 and later", Pattern.DOTALL)), - new LicenseMatcher( - "MIT", - true, - false, - Pattern.compile( - ("\n" - + "Permission is hereby granted, free of charge, to any person obtaining a copy of\n" - + "this software and associated documentation files \\(the \"Software\"\\), to deal in\n" - + "the Software without restriction, including without limitation the rights to\n" - + "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n" - + "of the Software, and to permit persons to whom the Software is furnished to do\n" - + "so, subject to the following conditions:\n" - + "\n" - + "The above copyright notice and this permission notice shall be included in all\n" - + "copies or substantial portions of the Software\\.\n" - + "\n" - + "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n" - + "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n" - + "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT\\. IN NO EVENT SHALL THE\n" - + "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n" - + "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n" - + "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n" - + "SOFTWARE\\.\n").replaceAll("\\s+", "\\\\s*"), - Pattern.DOTALL - ) - ), + new LicenseMatcher("MIT", true, false, Pattern.compile((""" + + Permission is hereby granted, free of charge, to any person obtaining a copy of + this software and associated documentation files \\(the "Software"\\), to deal in + the Software without restriction, including without limitation the rights to + use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is furnished to do + so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software\\. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT\\. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE\\. + """).replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL)), new LicenseMatcher( "MIT-0", true, false, Pattern.compile( - ("MIT No Attribution\n" - + "Copyright .+\n" - + "\n" - + "Permission is hereby granted, free of charge, to any person obtaining a copy of " - + "this software and associated documentation files \\(the \"Software\"\\), to deal in the Software without " - + "restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, " - + "and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so.\n" - + "\n" - + "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, " - + "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND " - + "NONINFRINGEMENT\\. IN NO EVENT SHALL THE AUTHORS OR " - + "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR " - + "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n") - .replaceAll("\\s+", "\\\\s*"), + (""" + MIT No Attribution + Copyright .+ + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files \\(the "Software"\\), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT\\. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + """) + .replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL ) ), @@ -152,17 +128,10 @@ public class LicenseAnalyzer { new LicenseMatcher("EDL-1.0", true, false, Pattern.compile("Eclipse Distribution License - v 1.0", Pattern.DOTALL)), new LicenseMatcher("LGPL-2.1", true, true, Pattern.compile("GNU LESSER GENERAL PUBLIC LICENSE.*Version 2.1", Pattern.DOTALL)), new LicenseMatcher("LGPL-3.0", true, true, Pattern.compile("GNU LESSER GENERAL PUBLIC LICENSE.*Version 3", Pattern.DOTALL)), - new LicenseMatcher( - "GeoLite", - false, - false, - Pattern.compile( - ("The Elastic GeoIP Database Service uses the GeoLite2 Data created " - + "and licensed by MaxMind,\nwhich is governed by MaxMind’s GeoLite2 End User License Agreement, " - + "available at https://www.maxmind.com/en/geolite2/eula.\n").replaceAll("\\s+", "\\\\s*"), - Pattern.DOTALL - ) - ), + new LicenseMatcher("GeoLite", false, false, Pattern.compile((""" + The Elastic GeoIP Database Service uses the GeoLite2 Data created and licensed by MaxMind, + which is governed by MaxMind’s GeoLite2 End User License Agreement, available at https://www.maxmind.com/en/geolite2/eula. + """).replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL)), new LicenseMatcher( "GeoIp-Database-Service", false, @@ -178,53 +147,16 @@ public static LicenseInfo licenseType(File licenseFile) { for (LicenseMatcher matcher : matchers) { boolean matches = matcher.matches(licenseFile); if (matches) { - return new LicenseInfo(matcher.getIdentifier(), matcher.spdxLicense, matcher.sourceRedistributionRequired); + return new LicenseInfo(matcher.identifier(), matcher.spdxLicense, matcher.sourceRedistributionRequired); } } throw new IllegalStateException("Unknown license for license file: " + licenseFile); } - public static class LicenseInfo { - private final String identifier; - private final boolean spdxLicense; - private final boolean sourceRedistributionRequired; + public record LicenseInfo(String identifier, boolean spdxLicense, boolean sourceRedistributionRequired) {} - public LicenseInfo(String identifier, boolean spdxLicense, boolean sourceRedistributionRequired) { - this.identifier = identifier; - this.spdxLicense = spdxLicense; - this.sourceRedistributionRequired = sourceRedistributionRequired; - } - - public String getIdentifier() { - return identifier; - } - - public boolean isSpdxLicense() { - return spdxLicense; - } - - public boolean isSourceRedistributionRequired() { - return sourceRedistributionRequired; - } - } - - private static class LicenseMatcher { - private final String identifier; - private final boolean spdxLicense; - private final boolean sourceRedistributionRequired; - private final Pattern pattern; - - LicenseMatcher(String identifier, boolean spdxLicense, boolean sourceRedistributionRequired, Pattern pattern) { - this.identifier = identifier; - this.spdxLicense = spdxLicense; - this.sourceRedistributionRequired = sourceRedistributionRequired; - this.pattern = pattern; - } - - public String getIdentifier() { - return identifier; - } + private record LicenseMatcher(String identifier, boolean spdxLicense, boolean sourceRedistributionRequired, Pattern pattern) { public boolean matches(File licenseFile) { try { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index 380ab863c8584..ca99744ea18b0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -30,7 +30,7 @@ public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin implements I public TaskProvider createTask(Project project) { project.getPlugins().apply(CompileOnlyResolvePlugin.class); project.getConfigurations().create("forbiddenApisCliJar"); - project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:3.1"); + project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:3.2"); Configuration jdkJarHellConfig = project.getConfigurations().create(JDK_JAR_HELL_CONFIG_NAME); if (project.getPath().equals(LIBS_ELASTICSEARCH_CORE_PROJECT_PATH) == false) { // Internal projects are not all plugins, so make sure the check is available diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java index fc33c288cf944..e98b338e60891 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java @@ -11,6 +11,7 @@ import com.google.common.annotations.VisibleForTesting; import org.elasticsearch.gradle.VersionProperties; +import org.gradle.api.GradleException; import java.io.File; import java.io.FileWriter; @@ -18,56 +19,141 @@ import java.nio.file.Files; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; import java.util.stream.Collectors; import static java.util.Comparator.comparing; import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toCollection; /** - * Generates the page that lists the breaking changes and deprecations for a minor version release. + * Generates the page that contains an index into the breaking changes and lists deprecations for a minor version release, + * and the individual pages for each breaking area. */ public class BreakingChangesGenerator { - static void update(File templateFile, File outputFile, List entries) throws IOException { - try (FileWriter output = new FileWriter(outputFile)) { + // Needs to match `changelog-schema.json` + private static final List BREAKING_AREAS = List.of( + "Cluster and node setting", + "Command line tool", + "Index setting", + "JVM option", + "Java API", + "Logging", + "Mapping", + "Packaging", + "Painless", + "REST API", + "System requirement", + "Transform" + ); + + static void update( + File indexTemplateFile, + File indexOutputFile, + File outputDirectory, + File areaTemplateFile, + List entries + ) throws IOException { + if (outputDirectory.exists()) { + if (outputDirectory.isDirectory() == false) { + throw new GradleException("Path [" + outputDirectory + "] exists but isn't a directory!"); + } + } else { + Files.createDirectory(outputDirectory.toPath()); + } + + try (FileWriter output = new FileWriter(indexOutputFile)) { output.write( - generateFile(QualifiedVersion.of(VersionProperties.getElasticsearch()), Files.readString(templateFile.toPath()), entries) + generateIndexFile( + QualifiedVersion.of(VersionProperties.getElasticsearch()), + Files.readString(indexTemplateFile.toPath()), + entries + ) ); } - } - @VisibleForTesting - static String generateFile(QualifiedVersion version, String template, List entries) throws IOException { + String areaTemplate = Files.readString(areaTemplateFile.toPath()); - final Map>> breakingChangesByNotabilityByArea = entries.stream() - .map(ChangelogEntry::getBreaking) - .filter(Objects::nonNull) - .sorted(comparing(ChangelogEntry.Breaking::getTitle)) - .collect( - groupingBy( - ChangelogEntry.Breaking::isNotable, - groupingBy(ChangelogEntry.Breaking::getArea, TreeMap::new, Collectors.toList()) - ) - ); + for (String breakingArea : BREAKING_AREAS) { + final List entriesForArea = entries.stream() + .map(ChangelogEntry::getBreaking) + .filter(entry -> entry != null && breakingArea.equals(entry.getArea())) + .collect(Collectors.toList()); + if (entriesForArea.isEmpty()) { + continue; + } + + final String outputFilename = breakingArea.toLowerCase(Locale.ROOT).replaceFirst(" and", "").replaceAll(" ", "-") + + "-changes.asciidoc"; + + try (FileWriter output = new FileWriter(outputDirectory.toPath().resolve(outputFilename).toFile())) { + output.write( + generateBreakingAreaFile( + QualifiedVersion.of(VersionProperties.getElasticsearch()), + areaTemplate, + breakingArea, + entriesForArea + ) + ); + } + } + } + + @VisibleForTesting + static String generateIndexFile(QualifiedVersion version, String template, List entries) throws IOException { final Map> deprecationsByArea = entries.stream() .map(ChangelogEntry::getDeprecation) .filter(Objects::nonNull) .sorted(comparing(ChangelogEntry.Deprecation::getTitle)) .collect(groupingBy(ChangelogEntry.Deprecation::getArea, TreeMap::new, Collectors.toList())); + final List breakingIncludeList = entries.stream() + .filter(each -> each.getBreaking() != null) + .map(each -> each.getBreaking().getArea().toLowerCase(Locale.ROOT).replaceFirst(" and", "").replaceAll(" ", "-")) + .distinct() + .sorted() + .toList(); + final Map bindings = new HashMap<>(); - bindings.put("breakingChangesByNotabilityByArea", breakingChangesByNotabilityByArea); + bindings.put("breakingIncludeList", breakingIncludeList); bindings.put("deprecationsByArea", deprecationsByArea); bindings.put("isElasticsearchSnapshot", version.isSnapshot()); - bindings.put("majorDotMinor", version.getMajor() + "." + version.getMinor()); - bindings.put("majorMinor", String.valueOf(version.getMajor()) + version.getMinor()); - bindings.put("nextMajor", (version.getMajor() + 1) + ".0"); + bindings.put("majorDotMinor", version.major() + "." + version.minor()); + bindings.put("majorMinor", String.valueOf(version.major()) + version.minor()); + bindings.put("nextMajor", (version.major() + 1) + ".0"); bindings.put("version", version); return TemplateUtils.render(template, bindings); } + + @VisibleForTesting + static String generateBreakingAreaFile( + QualifiedVersion version, + String template, + String breakingArea, + List entriesForArea + ) throws IOException { + final Map> breakingEntriesByNotability = entriesForArea.stream() + .collect( + groupingBy( + ChangelogEntry.Breaking::isNotable, + toCollection(() -> new TreeSet<>(comparing(ChangelogEntry.Breaking::getTitle))) + ) + ); + + final Map bindings = new HashMap<>(); + bindings.put("breakingArea", breakingArea); + bindings.put("breakingEntriesByNotability", breakingEntriesByNotability); + bindings.put("breakingAreaAnchor", breakingArea.toLowerCase(Locale.ROOT).replaceFirst(" and", "").replaceAll(" ", "_")); + bindings.put("majorMinor", String.valueOf(version.major()) + version.minor()); + + return TemplateUtils.render(template, bindings); + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java index 19b9ed2f274a4..94c77768b14b0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java @@ -215,6 +215,7 @@ public static class Breaking { private String details; private String impact; private boolean notable; + private boolean essSettingChange; public String getArea() { return area; @@ -260,6 +261,14 @@ public String getAnchor() { return generatedAnchor(this.title); } + public boolean isEssSettingChange() { + return essSettingChange; + } + + public void setEssSettingChange(boolean essSettingChange) { + this.essSettingChange = essSettingChange; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -273,23 +282,25 @@ public boolean equals(Object o) { && Objects.equals(area, breaking.area) && Objects.equals(title, breaking.title) && Objects.equals(details, breaking.details) - && Objects.equals(impact, breaking.impact); + && Objects.equals(impact, breaking.impact) + && Objects.equals(essSettingChange, breaking.essSettingChange); } @Override public int hashCode() { - return Objects.hash(area, title, details, impact, notable); + return Objects.hash(area, title, details, impact, notable, essSettingChange); } @Override public String toString() { return String.format( - "Breaking{area='%s', title='%s', details='%s', impact='%s', isNotable=%s}", + "Breaking{area='%s', title='%s', details='%s', impact='%s', notable=%s, essSettingChange=%s}", area, title, details, impact, - notable + notable, + essSettingChange ); } } @@ -351,7 +362,7 @@ public String toString() { } private static String generatedAnchor(String input) { - final List excludes = List.of("the", "is", "a"); + final List excludes = List.of("the", "is", "a", "and"); final String[] words = input.toLowerCase(Locale.ROOT) .replaceAll("[^\\w]+", "_") diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java index 70fafc303bcd3..7c49ab10c8beb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java @@ -14,6 +14,8 @@ import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.Directory; +import org.gradle.api.file.DirectoryProperty; import org.gradle.api.file.FileCollection; import org.gradle.api.file.RegularFile; import org.gradle.api.file.RegularFileProperty; @@ -22,6 +24,7 @@ import org.gradle.api.model.ObjectFactory; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.TaskAction; import org.gradle.process.ExecOperations; @@ -55,11 +58,13 @@ public class GenerateReleaseNotesTask extends DefaultTask { private final RegularFileProperty releaseNotesTemplate; private final RegularFileProperty releaseHighlightsTemplate; private final RegularFileProperty breakingChangesTemplate; + private final RegularFileProperty breakingChangesAreaTemplate; private final RegularFileProperty releaseNotesIndexFile; private final RegularFileProperty releaseNotesFile; private final RegularFileProperty releaseHighlightsFile; - private final RegularFileProperty breakingChangesFile; + private final RegularFileProperty breakingChangesIndexFile; + private final DirectoryProperty breakingChangesDirectory; private final GitWrapper gitWrapper; @@ -71,11 +76,13 @@ public GenerateReleaseNotesTask(ObjectFactory objectFactory, ExecOperations exec releaseNotesTemplate = objectFactory.fileProperty(); releaseHighlightsTemplate = objectFactory.fileProperty(); breakingChangesTemplate = objectFactory.fileProperty(); + breakingChangesAreaTemplate = objectFactory.fileProperty(); releaseNotesIndexFile = objectFactory.fileProperty(); releaseNotesFile = objectFactory.fileProperty(); releaseHighlightsFile = objectFactory.fileProperty(); - breakingChangesFile = objectFactory.fileProperty(); + breakingChangesIndexFile = objectFactory.fileProperty(); + breakingChangesDirectory = objectFactory.directoryProperty(); gitWrapper = new GitWrapper(execOperations); } @@ -129,7 +136,9 @@ public void executeTask() throws IOException { LOGGER.info("Generating breaking changes / deprecations notes..."); BreakingChangesGenerator.update( this.breakingChangesTemplate.get().getAsFile(), - this.breakingChangesFile.get().getAsFile(), + this.breakingChangesIndexFile.get().getAsFile(), + this.breakingChangesDirectory.get().getAsFile(), + this.breakingChangesAreaTemplate.get().getAsFile(), entries ); } @@ -143,7 +152,7 @@ public void executeTask() throws IOException { @VisibleForTesting static Set getVersions(GitWrapper gitWrapper, String currentVersion) { QualifiedVersion v = QualifiedVersion.of(currentVersion); - Set versions = gitWrapper.listVersions("v" + v.getMajor() + '.' + v.getMinor() + ".*").collect(toSet()); + Set versions = gitWrapper.listVersions("v" + v.major() + '.' + v.minor() + ".*").collect(toSet()); versions.add(v); return versions; } @@ -174,7 +183,7 @@ static Map> partitionFilesByVersion( QualifiedVersion currentVersion = QualifiedVersion.of(versionString); // Find all tags for this minor series, using a wildcard tag pattern. - String tagWildcard = "v%d.%d*".formatted(currentVersion.getMajor(), currentVersion.getMinor()); + String tagWildcard = "v%d.%d*".formatted(currentVersion.major(), currentVersion.minor()); final List earlierVersions = gitWrapper.listVersions(tagWildcard) // Only keep earlier versions, and if `currentVersion` is a prerelease, then only prereleases too. @@ -339,11 +348,29 @@ public void setReleaseHighlightsFile(RegularFile file) { } @OutputFile - public RegularFileProperty getBreakingChangesFile() { - return breakingChangesFile; + public RegularFileProperty getBreakingChangesIndexFile() { + return breakingChangesIndexFile; } - public void setBreakingChangesFile(RegularFile file) { - this.breakingChangesFile.set(file); + public void setBreakingChangesIndexFile(RegularFile file) { + this.breakingChangesIndexFile.set(file); + } + + public void setBreakingChangesDirectory(Directory breakingChangesDirectory) { + this.breakingChangesDirectory.set(breakingChangesDirectory); + } + + @OutputDirectory + public DirectoryProperty getBreakingChangesDirectory() { + return breakingChangesDirectory; + } + + @InputFile + public RegularFileProperty getBreakingChangesAreaTemplate() { + return breakingChangesAreaTemplate; + } + + public void setBreakingChangesAreaTemplate(RegularFile file) { + this.breakingChangesAreaTemplate.set(file); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/PruneChangelogsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/PruneChangelogsTask.java index 45d85ec61cec0..a2a1c58d67cc2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/PruneChangelogsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/PruneChangelogsTask.java @@ -162,7 +162,7 @@ private static Set findAllFilesInEarlierVersions(GitWrapper gitWrapper, */ @VisibleForTesting static Stream findPreviousVersion(GitWrapper gitWrapper, QualifiedVersion version) { - final int majorSeries = version.getMinor() == 0 && version.getRevision() == 0 ? version.getMajor() - 1 : version.getMajor(); + final int majorSeries = version.minor() == 0 && version.revision() == 0 ? version.major() - 1 : version.major(); final String tagPattern = "v" + majorSeries + ".*"; return gitWrapper.listVersions(tagPattern).filter(v -> v.isBefore(version)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/QualifiedVersion.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/QualifiedVersion.java index 0cc579e68d52a..df22f0b804d3c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/QualifiedVersion.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/QualifiedVersion.java @@ -21,26 +21,21 @@ * with how {@link Version} is used in the build. It also retains any qualifier (prerelease) information, and uses * that information when comparing instances. */ -public final class QualifiedVersion implements Comparable { - private final int major; - private final int minor; - private final int revision; - private final Qualifier qualifier; +public record QualifiedVersion( + int major, + int minor, + int revision, + org.elasticsearch.gradle.internal.release.QualifiedVersion.Qualifier qualifier +) implements Comparable { private static final Pattern pattern = Pattern.compile( "^v? (\\d+) \\. (\\d+) \\. (\\d+) (?: - (alpha\\d+ | beta\\d+ | rc\\d+ | SNAPSHOT ) )? $", Pattern.COMMENTS ); - private QualifiedVersion(int major, int minor, int revision, String qualifier) { - this.major = major; - this.minor = minor; - this.revision = revision; - this.qualifier = qualifier == null ? null : Qualifier.of(qualifier); - } - /** * Parses the supplied string into an object. + * * @param s a version string in strict semver * @return a new instance */ @@ -55,7 +50,7 @@ public static QualifiedVersion of(final String s) { Integer.parseInt(matcher.group(1)), Integer.parseInt(matcher.group(2)), Integer.parseInt(matcher.group(3)), - matcher.group(4) + matcher.group(4) == null ? null : Qualifier.of(matcher.group(4)) ); } @@ -64,42 +59,10 @@ public String toString() { return "%d.%d.%d%s".formatted(major, minor, revision, qualifier == null ? "" : "-" + qualifier); } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - QualifiedVersion version = (QualifiedVersion) o; - return major == version.major - && minor == version.minor - && revision == version.revision - && Objects.equals(qualifier, version.qualifier); - } - - @Override - public int hashCode() { - return Objects.hash(major, minor, revision, qualifier); - } - - public int getMajor() { - return major; - } - - public int getMinor() { - return minor; - } - - public int getRevision() { - return revision; - } - public boolean hasQualifier() { return qualifier != null; } - public Qualifier getQualifier() { - return qualifier; - } - public boolean isSnapshot() { return this.qualifier != null && this.qualifier.level == QualifierLevel.SNAPSHOT; } @@ -129,22 +92,10 @@ private enum QualifierLevel { SNAPSHOT } - private static class Qualifier implements Comparable { - private final QualifierLevel level; - private final int number; - - private Qualifier(QualifierLevel level, int number) { - this.level = level; - this.number = number; - } + private record Qualifier(QualifierLevel level, int number) implements Comparable { private static final Comparator COMPARATOR = Comparator.comparing((Qualifier p) -> p.level).thenComparing(p -> p.number); - @Override - public int compareTo(Qualifier other) { - return COMPARATOR.compare(this, other); - } - private static Qualifier of(String qualifier) { if ("SNAPSHOT".equals(qualifier)) { return new Qualifier(QualifierLevel.SNAPSHOT, 0); @@ -162,21 +113,14 @@ private static Qualifier of(String qualifier) { } } - public String toString() { - return level == QualifierLevel.SNAPSHOT ? level.name() : level.name() + number; - } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Qualifier that = (Qualifier) o; - return number == that.number && level == that.level; + public int compareTo(Qualifier other) { + return COMPARATOR.compare(this, other); } @Override - public int hashCode() { - return Objects.hash(level, number); + public String toString() { + return level == QualifierLevel.SNAPSHOT ? level.name() : level.name() + number; } } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java index e8e807f301a2c..3d86e0ae9f2b4 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java @@ -39,9 +39,9 @@ static void update(File templateFile, File outputFile, List entr static String generateFile(QualifiedVersion version, String template, List entries) throws IOException { final List priorVersions = new ArrayList<>(); - if (version.getMinor() > 0) { - final int major = version.getMajor(); - for (int minor = version.getMinor(); minor >= 0; minor--) { + if (version.minor() > 0) { + final int major = version.major(); + for (int minor = version.minor(); minor >= 0; minor--) { String majorMinor = major + "." + minor; String fileSuffix = ""; if (major == 7 && minor < 7) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGenerator.java index 839c318f32784..03c01f5d8e24b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGenerator.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGenerator.java @@ -43,7 +43,7 @@ static String generateFile(Set versionsSet, String template) t versionsSet.stream().map(v -> v.isSnapshot() ? v.withoutQualifier() : v).forEach(versions::add); final List includeVersions = versions.stream() - .map(v -> v.hasQualifier() ? v.toString() : v.getMajor() + "." + v.getMinor()) + .map(v -> v.hasQualifier() ? v.toString() : v.major() + "." + v.minor()) .distinct() .collect(Collectors.toList()); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index 8f08da371ec4b..97b0b46365bda 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -84,11 +84,15 @@ public void apply(Project project) { task.setReleaseHighlightsFile(projectDirectory.file("docs/reference/release-notes/highlights.asciidoc")); task.setBreakingChangesTemplate(projectDirectory.file(RESOURCES + "templates/breaking-changes.asciidoc")); - task.setBreakingChangesFile( + task.setBreakingChangesIndexFile( projectDirectory.file( String.format("docs/reference/migration/migrate_%d_%d.asciidoc", version.getMajor(), version.getMinor()) ) ); + task.setBreakingChangesAreaTemplate(projectDirectory.file(RESOURCES + "templates/breaking-changes-area.asciidoc")); + task.setBreakingChangesDirectory( + projectDirectory.dir(String.format("docs/reference/migration/migrate_%d_%d", version.getMajor(), version.getMinor())) + ); task.dependsOn(validateChangelogsTask); }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index 16c531113410b..7510e0ff90a8b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -117,7 +117,7 @@ public void apply(Project project) { depsTask.configure(t -> t.dependsOn(examplePlugin.getDependencies())); depsTasks.put(taskname, depsTask); TaskProvider destructiveTask = configureTestTask(project, taskname, distribution, t -> { - t.onlyIf(t2 -> distribution.isDocker() == false || dockerSupport.get().getDockerAvailability().isAvailable); + t.onlyIf(t2 -> distribution.isDocker() == false || dockerSupport.get().getDockerAvailability().isAvailable()); addDistributionSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath); addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePlugin.getSingleFile().toString()); t.exclude("**/PackageUpgradeTests.class"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java index 65f29fbbf7472..d00301e96fab2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java @@ -32,7 +32,6 @@ import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -184,41 +183,15 @@ public Set getFailedTests() { * use this a the key for our HashMap, it's best to control the implementation as there's no guarantee that Gradle's * various {@link TestDescriptor} implementations reliably implement equals and hashCode. */ - public static class Descriptor { - private final String name; - private final String className; - private final String parent; - - private Descriptor(String name, String className, String parent) { - this.name = name; - this.className = className; - this.parent = parent; - } + public record Descriptor(String name, String className, String parent) { public static Descriptor of(TestDescriptor d) { return new Descriptor(d.getName(), d.getClassName(), d.getParent() == null ? null : d.getParent().toString()); } - public String getClassName() { - return className; - } - public String getFullName() { return className + "." + name; } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Descriptor that = (Descriptor) o; - return Objects.equals(name, that.name) && Objects.equals(className, that.className) && Objects.equals(parent, that.parent); - } - - @Override - public int hashCode() { - return Objects.hash(name, className, parent); - } } private class EventWriter implements Closeable { @@ -226,7 +199,7 @@ private class EventWriter implements Closeable { private final Writer writer; EventWriter(Descriptor descriptor) { - this.outputFile = new File(outputDirectory, descriptor.getClassName() + ".out"); + this.outputFile = new File(outputDirectory, descriptor.className() + ".out"); FileOutputStream fos; try { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java index 7be77a69e8fc5..1d5d0078e771a 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java @@ -21,7 +21,6 @@ import org.gradle.api.Project; import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.provider.ProviderFactory; -import org.jetbrains.annotations.Nullable; import javax.inject.Inject; @@ -78,7 +77,6 @@ public void apply(Project project) { .configureEach(t -> t.finalizedBy(project.getTasks().withType(FixtureStop.class))); } - @Nullable private String systemProperty(String propName) { return providerFactory.systemProperty(propName).forUseAtConfigurationTime().getOrNull(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/ReplaceByKey.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/ReplaceByKey.java index d4c367fe0b882..6bb0f8f6db0aa 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/ReplaceByKey.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/ReplaceByKey.java @@ -54,7 +54,7 @@ public String getNewChildKey() { @Override public boolean shouldApply(RestTestContext testContext) { - return testName == null || testContext.getTestName().equals(testName); + return testName == null || testContext.testName().equals(testName); } @Input diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/RestTestContext.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/RestTestContext.java index 9b17a3cd81f38..80b780e656062 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/RestTestContext.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/RestTestContext.java @@ -11,15 +11,4 @@ /** * A place to stash information about a test that is being transformed. */ -public class RestTestContext { - - private final String testName; - - public RestTestContext(String testName) { - this.testName = testName; - } - - public String getTestName() { - return testName; - } -} +public record RestTestContext(String testName) {} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/match/AddMatch.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/match/AddMatch.java index cfaf795a5780e..40d2cf6d24adc 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/match/AddMatch.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/match/AddMatch.java @@ -37,7 +37,7 @@ public AddMatch(String matchKey, JsonNode matchValue, String testName) { @Override public boolean shouldApply(RestTestContext testContext) { - return testContext.getTestName().equals(testName); + return testContext.testName().equals(testName); } @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatch.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatch.java index 853ae5c88979a..10c5152ba5376 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatch.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatch.java @@ -47,7 +47,7 @@ public String requiredChildKey() { @Override public boolean shouldApply(RestTestContext testContext) { - return testName == null || testContext.getTestName().equals(testName); + return testName == null || testContext.testName().equals(testName); } @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/skip/Skip.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/skip/Skip.java index c8a7e0eddab83..d8e5f773584db 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/skip/Skip.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/skip/Skip.java @@ -18,7 +18,6 @@ import org.elasticsearch.gradle.internal.test.rest.transform.RestTestTransformByParentObject; import org.elasticsearch.gradle.internal.test.rest.transform.RestTestTransformGlobalSetup; import org.gradle.api.tasks.Input; -import org.jetbrains.annotations.Nullable; import java.util.Iterator; @@ -43,7 +42,7 @@ public Skip(String skipReason) { } @Override - public ObjectNode transformSetup(@Nullable ObjectNode setupNodeParent) { + public ObjectNode transformSetup(ObjectNode setupNodeParent) { // only transform the global setup if there is no named test if (testName.isBlank()) { ArrayNode setupNode; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/text/ReplaceTextual.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/text/ReplaceTextual.java index c99c01c6d9223..a02da69026028 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/text/ReplaceTextual.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/text/ReplaceTextual.java @@ -54,7 +54,7 @@ public String requiredChildKey() { @Override public boolean shouldApply(RestTestContext testContext) { - return testName == null || testContext.getTestName().equals(testName); + return testName == null || testContext.testName().equals(testName); } @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarnings.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarnings.java index 8bfbadbe86ad3..f8116d14f2bf5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarnings.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarnings.java @@ -88,7 +88,7 @@ public List getAllowedWarnings() { @Override public boolean shouldApply(RestTestContext testContext) { - return testName == null || testContext.getTestName().equals(testName); + return testName == null || testContext.testName().equals(testName); } @Input diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarnings.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarnings.java index b95881643fbb9..2cff61de0ac12 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarnings.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarnings.java @@ -76,7 +76,7 @@ public String getSkipFeatureName() { @Override public boolean shouldApply(RestTestContext testContext) { - return testName.equals(testContext.getTestName()); + return testName.equals(testContext.testName()); } @Input diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarnings.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarnings.java index a00901a2b1418..13e40f6f00790 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarnings.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarnings.java @@ -82,7 +82,7 @@ public Set getWarnings() { @Override public boolean shouldApply(RestTestContext testContext) { - return testName == null || testContext.getTestName().equals(testName); + return testName == null || testContext.testName().equals(testName); } @Input diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java index 0112e34315952..128370bb56168 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java @@ -108,11 +108,10 @@ public void execute(Task task) { maybeSkipTask(dockerSupport, buildFixture); ComposeExtension composeExtension = project.getExtensions().getByType(ComposeExtension.class); - composeExtension.setUseComposeFiles(Collections.singletonList(DOCKER_COMPOSE_YML)); - composeExtension.setRemoveContainers(true); - composeExtension.setExecutable( - project.file("/usr/local/bin/docker-compose").exists() ? "/usr/local/bin/docker-compose" : "/usr/bin/docker-compose" - ); + composeExtension.getUseComposeFiles().addAll(Collections.singletonList(DOCKER_COMPOSE_YML)); + composeExtension.getRemoveContainers().set(true); + composeExtension.getExecutable() + .set(project.file("/usr/local/bin/docker-compose").exists() ? "/usr/local/bin/docker-compose" : "/usr/bin/docker-compose"); tasks.named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions @@ -169,7 +168,7 @@ private void maybeSkipTask(Provider dockerSupport, TaskPro private void maybeSkipTask(Provider dockerSupport, Task task) { task.onlyIf(spec -> { - boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isComposeAvailable; + boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isComposeAvailable(); if (isComposeAvailable == false) { LOGGER.info("Task {} requires docker-compose but it is unavailable. Task will be skipped.", task.getPath()); } diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index 7eb80babe3c15..5fb237cd7c964 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -25,30 +25,24 @@ "Authentication", "Authorization", "Autoscaling", + "CAT APIs", "CCR", "CRUD", "Client", "Cluster Coordination", + "Data streams", "Discovery-Plugins", "Distributed", "EQL", "Engine", "FIPS", - "Features/CAT APIs", - "Features/Data streams", - "Features/Features", - "Features/ILM+SLM", - "Features/Indices APIs", - "Features/Ingest", - "Features/Java High Level REST Client", - "Features/Java Low Level REST Client", - "Features/Monitoring", - "Features/Stats", - "Features/Watcher", + "Features", "Geo", "Graph", "Highlighting", + "ILM+SLM", "IdentityProvider", + "Indices APIs", "Infra/CLI", "Infra/Circuit Breakers", "Infra/Core", @@ -60,9 +54,13 @@ "Infra/Scripting", "Infra/Settings", "Infra/Transport API", + "Ingest", + "Java High Level REST Client", + "Java Low Level REST Client", "License", "Machine Learning", "Mapping", + "Monitoring", "Network", "Packaging", "Percolator", @@ -76,11 +74,13 @@ "Search", "Security", "Snapshot/Restore", + "Stats", "Store", "Suggesters", "TLS", "Task Management", - "Transform" + "Transform", + "Watcher" ] }, "type": { @@ -157,6 +157,9 @@ }, "notable": { "type": "boolean" + }, + "ess_setting_change": { + "type": "boolean" } }, "required": [ @@ -179,6 +182,9 @@ "body": { "type": "string", "minLength": 1 + }, + "ess_setting_change": { + "type": "boolean" } }, "required": [ diff --git a/build-tools-internal/src/main/resources/checkstyle.xml b/build-tools-internal/src/main/resources/checkstyle.xml index abaab3a1a8ae0..22a0d2912e4d7 100644 --- a/build-tools-internal/src/main/resources/checkstyle.xml +++ b/build-tools-internal/src/main/resources/checkstyle.xml @@ -107,18 +107,35 @@ + + + + + testing basic search with sort"); { Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity("{ \"sort\": [{ \"int\" : \"asc\" }]}"); + searchRequest.setJsonEntity(""" + { "sort": [{ "int" : "asc" }]}"""); Map response = entityAsMap(client().performRequest(searchRequest)); assertNoFailures(response); assertTotalHits(count, response); @@ -629,7 +638,8 @@ void assertBasicSearchWorks(int count) throws IOException { logger.info("--> testing exists filter"); { Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity("{ \"query\": { \"exists\" : {\"field\": \"string\"} }}"); + searchRequest.setJsonEntity(""" + { "query": { "exists" : {"field": "string"} }}"""); Map response = entityAsMap(client().performRequest(searchRequest)); assertNoFailures(response); assertTotalHits(count, response); @@ -638,7 +648,8 @@ void assertBasicSearchWorks(int count) throws IOException { logger.info("--> testing field with dots in the name"); { Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity("{ \"query\": { \"exists\" : {\"field\": \"field.with.dots\"} }}"); + searchRequest.setJsonEntity(""" + { "query": { "exists" : {"field": "field.with.dots"} }}"""); Map response = entityAsMap(client().performRequest(searchRequest)); assertNoFailures(response); assertTotalHits(count, response); @@ -675,7 +686,17 @@ void assertAllSearchWorks(int count) throws IOException { void assertBasicAggregationWorks() throws IOException { // histogram on a long Request longHistogramRequest = new Request("GET", "/" + index + "/_search"); - longHistogramRequest.setJsonEntity("{ \"aggs\": { \"histo\" : {\"histogram\" : {\"field\": \"int\", \"interval\": 10}} }}"); + longHistogramRequest.setJsonEntity(""" + { + "aggs": { + "histo": { + "histogram": { + "field": "int", + "interval": 10 + } + } + } + }"""); Map longHistogram = entityAsMap(client().performRequest(longHistogramRequest)); assertNoFailures(longHistogram); List histoBuckets = (List) XContentMapValues.extractValue("aggregations.histo.buckets", longHistogram); @@ -688,7 +709,16 @@ void assertBasicAggregationWorks() throws IOException { // terms on a boolean Request boolTermsRequest = new Request("GET", "/" + index + "/_search"); - boolTermsRequest.setJsonEntity("{ \"aggs\": { \"bool_terms\" : {\"terms\" : {\"field\": \"bool\"}} }}"); + boolTermsRequest.setJsonEntity(""" + { + "aggs": { + "bool_terms": { + "terms": { + "field": "bool" + } + } + } + }"""); Map boolTerms = entityAsMap(client().performRequest(boolTermsRequest)); List termsBuckets = (List) XContentMapValues.extractValue("aggregations.bool_terms.buckets", boolTerms); int termsCount = 0; @@ -701,17 +731,20 @@ void assertBasicAggregationWorks() throws IOException { void assertRealtimeGetWorks() throws IOException { Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); - disableAutoRefresh.setJsonEntity("{ \"index\": { \"refresh_interval\" : -1 }}"); + disableAutoRefresh.setJsonEntity(""" + { "index": { "refresh_interval" : -1 }}"""); client().performRequest(disableAutoRefresh); Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity("{ \"query\": { \"match_all\" : {} }}"); + searchRequest.setJsonEntity(""" + { "query": { "match_all" : {} }}"""); Map searchResponse = entityAsMap(client().performRequest(searchRequest)); Map hit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); String docId = (String) hit.get("_id"); Request updateRequest = new Request("POST", "/" + index + "/_update/" + docId); - updateRequest.setJsonEntity("{ \"doc\" : { \"foo\": \"bar\"}}"); + updateRequest.setJsonEntity(""" + { "doc" : { "foo": "bar"}}"""); client().performRequest(updateRequest); Request getRequest = new Request("GET", "/" + index + "/_doc/" + docId); @@ -721,13 +754,21 @@ void assertRealtimeGetWorks() throws IOException { assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); Request enableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); - enableAutoRefresh.setJsonEntity("{ \"index\": { \"refresh_interval\" : \"1s\" }}"); + enableAutoRefresh.setJsonEntity(""" + { "index": { "refresh_interval" : "1s" }}"""); client().performRequest(enableAutoRefresh); } void assertStoredBinaryFields(int count) throws Exception { Request request = new Request("GET", "/" + index + "/_search"); - request.setJsonEntity("{ \"query\": { \"match_all\" : {} }, \"size\": 100, \"stored_fields\": \"binary\"}"); + request.setJsonEntity(""" + { + "query": { + "match_all": {} + }, + "size": 100, + "stored_fields": "binary" + }"""); Map rsp = entityAsMap(client().performRequest(request)); assertTotalHits(count, rsp); @@ -782,7 +823,7 @@ public void testSingleDoc() throws IOException { * Tests that a single empty shard index is correctly recovered. Empty shards are often an edge case. */ public void testEmptyShard() throws IOException { - final String index = "test_empty_shard"; + final String indexName = "test_empty_shard"; if (isRunningAgainstOldCluster()) { Settings.Builder settings = Settings.builder() @@ -794,15 +835,16 @@ public void testEmptyShard() throws IOException { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - createIndex(index, settings.build()); + createIndex(indexName, settings.build()); } - ensureGreen(index); + ensureGreen(indexName); } /** * Tests recovery of an index with or without a translog and the * statistics we gather about that. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/52031") public void testRecovery() throws Exception { int count; boolean shouldHaveTranslog; @@ -881,6 +923,7 @@ public void testRecovery() throws Exception { String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString(); + String minCompatibleBWCVersion = Version.CURRENT.minimumCompatibilityVersion().luceneVersion.toString(); if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { int numCurrentVersion = 0; int numBwcVersion = 0; @@ -899,6 +942,10 @@ public void testRecovery() throws Exception { numCurrentVersion++; } else if (bwcLuceneVersion.equals(version)) { numBwcVersion++; + } else if (minCompatibleBWCVersion.equals(version) && minCompatibleBWCVersion.equals(bwcLuceneVersion) == false) { + // Our upgrade path from 7.non-last always goes through 7.last, which depending on timing can create 7.last + // index segment. We ignore those. + continue; } else { fail("expected version to be one of [" + currentLuceneVersion + "," + bwcLuceneVersion + "] but was " + line); } @@ -948,9 +995,9 @@ public void testSnapshotRestore() throws IOException { // Stick a routing attribute into to cluster settings so we can see it after the restore Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); - addRoutingSettings.setJsonEntity( - "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + getOldClusterVersion() + "\"}}" - ); + addRoutingSettings.setJsonEntity(""" + {"persistent": {"cluster.routing.allocation.exclude.test_attr": "%s"}} + """.formatted(getOldClusterVersion())); client().performRequest(addRoutingSettings); // Stick a template into the cluster so we can see it after the restore @@ -1165,21 +1212,24 @@ public void testClosedIndices() throws Exception { * that the index has started shards. */ @SuppressWarnings("unchecked") - private void assertClosedIndex(final String index, final boolean checkRoutingTable) throws IOException { + private void assertClosedIndex(final String indexName, final boolean checkRoutingTable) throws IOException { final Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); - final Map metadata = (Map) XContentMapValues.extractValue("metadata.indices." + index, state); + final Map metadata = (Map) XContentMapValues.extractValue("metadata.indices." + indexName, state); assertThat(metadata, notNullValue()); assertThat(metadata.get("state"), equalTo("close")); - final Map blocks = (Map) XContentMapValues.extractValue("blocks.indices." + index, state); + final Map blocks = (Map) XContentMapValues.extractValue("blocks.indices." + indexName, state); assertThat(blocks, notNullValue()); assertThat(blocks.containsKey(String.valueOf(MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID)), is(true)); final Map settings = (Map) XContentMapValues.extractValue("settings", metadata); assertThat(settings, notNullValue()); - final Map routingTable = (Map) XContentMapValues.extractValue("routing_table.indices." + index, state); + final Map routingTable = (Map) XContentMapValues.extractValue( + "routing_table.indices." + indexName, + state + ); if (checkRoutingTable) { assertThat(routingTable, notNullValue()); assertThat(Booleans.parseBoolean((String) XContentMapValues.extractValue("index.verified_before_close", settings)), is(true)); @@ -1198,7 +1248,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab for (Map shard : shards) { assertThat(XContentMapValues.extractValue("shard", shard), equalTo(i)); assertThat(XContentMapValues.extractValue("state", shard), equalTo("STARTED")); - assertThat(XContentMapValues.extractValue("index", shard), equalTo(index)); + assertThat(XContentMapValues.extractValue("index", shard), equalTo(indexName)); } } } else { @@ -1219,7 +1269,8 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver // Remove the routing setting and template so we can test restoring them. Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); - clearRoutingFromSettings.setJsonEntity("{\"persistent\":{\"cluster.routing.allocation.exclude.test_attr\": null}}"); + clearRoutingFromSettings.setJsonEntity(""" + {"persistent":{"cluster.routing.allocation.exclude.test_attr": null}}"""); client().performRequest(clearRoutingFromSettings); client().performRequest(new Request("DELETE", "/_template/test_template")); @@ -1245,8 +1296,10 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver int extras = between(1, 100); StringBuilder bulk = new StringBuilder(); for (int i = 0; i < extras; i++) { - bulk.append("{\"index\":{\"_id\":\"").append(count + i).append("\"}}\n"); - bulk.append("{\"test\":\"test\"}\n"); + bulk.append(""" + {"index":{"_id":"%s"}} + {"test":"test"} + """.formatted(count + i)); } Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/_bulk"); @@ -1353,12 +1406,12 @@ private String loadInfoDocument(String id) throws IOException { return m.group(1); } - private List dataNodes(String index, RestClient client) throws IOException { - Request request = new Request("GET", index + "/_stats"); + private List dataNodes(String indexName, RestClient client) throws IOException { + Request request = new Request("GET", indexName + "/_stats"); request.addParameter("level", "shards"); Response response = client.performRequest(request); List nodes = new ArrayList<>(); - List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); + List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + indexName + ".shards.0"); for (Object shard : shardStats) { final String nodeId = ObjectPath.evaluate(shard, "routing.node"); nodes.add(nodeId); @@ -1370,8 +1423,8 @@ private List dataNodes(String index, RestClient client) throws IOExcepti * Wait for an index to have green health, waiting longer than * {@link ESRestTestCase#ensureGreen}. */ - protected void ensureGreenLongWait(String index) throws IOException { - Request request = new Request("GET", "/_cluster/health/" + index); + protected void ensureGreenLongWait(String indexName) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + indexName); request.addParameter("timeout", "2m"); request.addParameter("wait_for_status", "green"); request.addParameter("wait_for_no_relocating_shards", "true"); @@ -1538,6 +1591,7 @@ public void testResize() throws Exception { } @SuppressWarnings("unchecked") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/81409") public void testSystemIndexMetadataIsUpgraded() throws Exception { final String systemIndexWarning = "this request accesses system indices: [.tasks], but in a future major version, direct " + "access to system indices will be prevented by default"; @@ -1549,21 +1603,23 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}}\n" + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); + bulk.setJsonEntity(""" + {"index": {"_index": "test_index_old"}} + {"f1": "v1", "f2": "v2"} + """); client().performRequest(bulk); // start a async reindex job Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity( - "{\n" - + " \"source\":{\n" - + " \"index\":\"test_index_old\"\n" - + " },\n" - + " \"dest\":{\n" - + " \"index\":\"test_index_reindex\"\n" - + " }\n" - + "}" - ); + reindex.setJsonEntity(""" + { + "source":{ + "index":"test_index_old" + }, + "dest":{ + "index":"test_index_reindex" + } + }"""); reindex.addParameter("wait_for_completion", "false"); Map response = entityAsMap(client().performRequest(reindex)); String taskId = (String) response.get("task"); @@ -1598,14 +1654,13 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { // Create an alias to make sure it gets upgraded properly Request putAliasRequest = new Request("POST", "/_aliases"); - putAliasRequest.setJsonEntity( - "{\n" - + " \"actions\": [\n" - + " {\"add\": {\"index\": \".tasks\", \"alias\": \"test-system-alias\"}},\n" - + " {\"add\": {\"index\": \"test_index_reindex\", \"alias\": \"test-system-alias\"}}\n" - + " ]\n" - + "}" - ); + putAliasRequest.setJsonEntity(""" + { + "actions": [ + {"add": {"index": ".tasks", "alias": "test-system-alias"}}, + {"add": {"index": "test_index_reindex", "alias": "test-system-alias"}} + ] + }"""); putAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 2235adeffdf63..dbbd55f7725a7 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -62,51 +62,57 @@ public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { private static final List CANDIDATES = new ArrayList<>(); static { - addCandidate("\"match\": { \"keyword_field\": \"value\"}", new MatchQueryBuilder("keyword_field", "value")); + addCandidate(""" + "match": { "text_field": "value"} + """, new MatchQueryBuilder("text_field", "value")); + addCandidate(""" + "match": { "text_field": {"query": "value", "operator": "and"} } + """, new MatchQueryBuilder("text_field", "value").operator(Operator.AND)); + addCandidate(""" + "match": { "text_field": {"query": "value", "analyzer": "english"} } + """, new MatchQueryBuilder("text_field", "value").analyzer("english")); + addCandidate(""" + "match": { "text_field": {"query": "value", "minimum_should_match": 3} } + """, new MatchQueryBuilder("text_field", "value").minimumShouldMatch("3")); + addCandidate(""" + "match": { "text_field": {"query": "value", "fuzziness": "auto"} } + """, new MatchQueryBuilder("text_field", "value").fuzziness(Fuzziness.AUTO)); + addCandidate(""" + "match_phrase": { "text_field": "value"} + """, new MatchPhraseQueryBuilder("text_field", "value")); + addCandidate(""" + "match_phrase": { "text_field": {"query": "value", "slop": 3}} + """, new MatchPhraseQueryBuilder("text_field", "value").slop(3)); + addCandidate(""" + "range": { "long_field": {"gte": 1, "lte": 9}} + """, new RangeQueryBuilder("long_field").from(1).to(9)); addCandidate( - "\"match\": { \"keyword_field\": {\"query\": \"value\", \"operator\": \"and\"} }", - new MatchQueryBuilder("keyword_field", "value").operator(Operator.AND) - ); - addCandidate( - "\"match\": { \"keyword_field\": {\"query\": \"value\", \"analyzer\": \"english\"} }", - new MatchQueryBuilder("keyword_field", "value").analyzer("english") - ); - addCandidate( - "\"match\": { \"keyword_field\": {\"query\": \"value\", \"minimum_should_match\": 3} }", - new MatchQueryBuilder("keyword_field", "value").minimumShouldMatch("3") - ); - addCandidate( - "\"match\": { \"keyword_field\": {\"query\": \"value\", \"fuzziness\": \"auto\"} }", - new MatchQueryBuilder("keyword_field", "value").fuzziness(Fuzziness.AUTO) - ); - addCandidate("\"match_phrase\": { \"keyword_field\": \"value\"}", new MatchPhraseQueryBuilder("keyword_field", "value")); - addCandidate( - "\"match_phrase\": { \"keyword_field\": {\"query\": \"value\", \"slop\": 3}}", - new MatchPhraseQueryBuilder("keyword_field", "value").slop(3) - ); - addCandidate("\"range\": { \"long_field\": {\"gte\": 1, \"lte\": 9}}", new RangeQueryBuilder("long_field").from(1).to(9)); - addCandidate( - "\"bool\": { \"must_not\": [{\"match_all\": {}}], \"must\": [{\"match_all\": {}}], " - + "\"filter\": [{\"match_all\": {}}], \"should\": [{\"match_all\": {}}]}", + """ + "bool": { "must_not": [{"match_all": {}}], "must": [{"match_all": {}}], "filter": [{"match_all": {}}], \ + "should": [{"match_all": {}}]} + """, new BoolQueryBuilder().mustNot(new MatchAllQueryBuilder()) .must(new MatchAllQueryBuilder()) .filter(new MatchAllQueryBuilder()) .should(new MatchAllQueryBuilder()) ); addCandidate( - "\"dis_max\": {\"queries\": [{\"match_all\": {}},{\"match_all\": {}},{\"match_all\": {}}], \"tie_breaker\": 0.01}", + """ + "dis_max": {"queries": [{"match_all": {}},{"match_all": {}},{"match_all": {}}], "tie_breaker": 0.01} + """, new DisMaxQueryBuilder().add(new MatchAllQueryBuilder()) .add(new MatchAllQueryBuilder()) .add(new MatchAllQueryBuilder()) .tieBreaker(0.01f) ); + addCandidate(""" + "constant_score": {"filter": {"match_all": {}}, "boost": 0.1} + """, new ConstantScoreQueryBuilder(new MatchAllQueryBuilder()).boost(0.1f)); addCandidate( - "\"constant_score\": {\"filter\": {\"match_all\": {}}, \"boost\": 0.1}", - new ConstantScoreQueryBuilder(new MatchAllQueryBuilder()).boost(0.1f) - ); - addCandidate( - "\"function_score\": {\"query\": {\"match_all\": {}}," - + "\"functions\": [{\"random_score\": {}, \"filter\": {\"match_all\": {}}, \"weight\": 0.2}]}", + """ + "function_score": {"query": {"match_all": {}},"functions": [{"random_score": {}, "filter": {"match_all": {}}, \ + "weight": 0.2}]} + """, new FunctionScoreQueryBuilder( new MatchAllQueryBuilder(), new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { @@ -117,22 +123,28 @@ public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { ) ); addCandidate( - "\"span_near\": {\"clauses\": [{ \"span_term\": { \"keyword_field\": \"value1\" }}, " - + "{ \"span_term\": { \"keyword_field\": \"value2\" }}]}", + """ + "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ + { "span_term": { "keyword_field": "value2" }}]} + """, new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 0).addClause( new SpanTermQueryBuilder("keyword_field", "value2") ) ); addCandidate( - "\"span_near\": {\"clauses\": [{ \"span_term\": { \"keyword_field\": \"value1\" }}, " - + "{ \"span_term\": { \"keyword_field\": \"value2\" }}], \"slop\": 2}", + """ + "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ + { "span_term": { "keyword_field": "value2" }}], "slop": 2} + """, new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 2).addClause( new SpanTermQueryBuilder("keyword_field", "value2") ) ); addCandidate( - "\"span_near\": {\"clauses\": [{ \"span_term\": { \"keyword_field\": \"value1\" }}, " - + "{ \"span_term\": { \"keyword_field\": \"value2\" }}], \"slop\": 2, \"in_order\": false}", + """ + "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ + { "span_term": { "keyword_field": "value2" }}], "slop": 2, "in_order": false} + """, new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 2).addClause( new SpanTermQueryBuilder("keyword_field", "value2") ).inOrder(false) @@ -167,6 +179,11 @@ public void testQueryBuilderBWC() throws Exception { mappingsAndSettings.field("type", "keyword"); mappingsAndSettings.endObject(); } + { + mappingsAndSettings.startObject("text_field"); + mappingsAndSettings.field("type", "text"); + mappingsAndSettings.endObject(); + } { mappingsAndSettings.startObject("long_field"); mappingsAndSettings.field("type", "long"); @@ -195,12 +212,9 @@ public void testQueryBuilderBWC() throws Exception { for (int i = 0; i < CANDIDATES.size(); i++) { QueryBuilder expectedQueryBuilder = (QueryBuilder) CANDIDATES.get(i)[1]; Request request = new Request("GET", "/" + index + "/_search"); - request.setJsonEntity( - "{\"query\": {\"ids\": {\"values\": [\"" - + Integer.toString(i) - + "\"]}}, " - + "\"docvalue_fields\": [{\"field\":\"query.query_builder_field\"}]}" - ); + request.setJsonEntity(""" + {"query": {"ids": {"values": ["%s"]}}, "docvalue_fields": [{"field":"query.query_builder_field"}]} + """.formatted(i)); Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); Map hitRsp = (Map) ((List) ((Map) toMap(rsp).get("hits")).get("hits")).get(0); diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java index e664de150b449..d7af475e15dc7 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java @@ -11,6 +11,8 @@ import org.hamcrest.Matchers; import org.junit.BeforeClass; +import java.util.Locale; + public class ESJsonLayoutTests extends ESTestCase { @BeforeClass public static void initNodeName() { @@ -25,23 +27,17 @@ public void testLayout() { ESJsonLayout server = ESJsonLayout.newBuilder().setType("server").build(); String conversionPattern = server.getPatternLayout().getConversionPattern(); - assertThat( - conversionPattern, - Matchers.equalTo( - "{" - + "\"type\": \"server\", " - + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}\", " - + "\"level\": \"%p\", " - + "\"component\": \"%c{1.}\", " - + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " - + "\"node.name\": \"%node_name\", " - + "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}\"" - + "%notEmpty{, %node_and_cluster_id }" - + "%notEmpty{, %CustomMapFields }" - + "%exceptionAsJson }" - + System.lineSeparator() - ) - ); + assertThat(conversionPattern, Matchers.equalTo(String.format(Locale.ROOT, """ + {\ + "type": "server", \ + "timestamp": "%%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}", \ + "level": "%%p", \ + "component": "%%c{1.}", \ + "cluster.name": "${sys:es.logs.cluster_name}", \ + "node.name": "%%node_name", \ + "message": "%%notEmpty{%%enc{%%marker}{JSON} }%%enc{%%.-10000m}{JSON}"%%notEmpty{, \ + %%node_and_cluster_id }%%notEmpty{, %%CustomMapFields }%%exceptionAsJson \ + }%n"""))); } public void testLayoutWithAdditionalFieldOverride() { @@ -49,21 +45,14 @@ public void testLayoutWithAdditionalFieldOverride() { String conversionPattern = server.getPatternLayout().getConversionPattern(); // message field is removed as is expected to be provided by a field from a message - assertThat( - conversionPattern, - Matchers.equalTo( - "{" - + "\"type\": \"server\", " - + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}\", " - + "\"level\": \"%p\", " - + "\"component\": \"%c{1.}\", " - + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " - + "\"node.name\": \"%node_name\"" - + "%notEmpty{, %node_and_cluster_id }" - + "%notEmpty{, %CustomMapFields }" - + "%exceptionAsJson }" - + System.lineSeparator() - ) - ); + assertThat(conversionPattern, Matchers.equalTo(String.format(Locale.ROOT, """ + {\ + "type": "server", \ + "timestamp": "%%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}", \ + "level": "%%p", \ + "component": "%%c{1.}", \ + "cluster.name": "${sys:es.logs.cluster_name}", \ + "node.name": "%%node_name"%%notEmpty{, %%node_and_cluster_id }%%notEmpty{, %%CustomMapFields }%%exceptionAsJson \ + }%n"""))); } } diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 28680aa82e393..d9edc333c7910 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -147,8 +147,9 @@ public void testDeprecatedMessageWithoutXOpaqueId() throws IOException { public void testCompatibleLog() throws Exception { withThreadContext(threadContext -> { - threadContext.putHeader(Task.X_OPAQUE_ID, "someId"); + threadContext.putHeader(Task.X_OPAQUE_ID_HTTP_HEADER, "someId"); threadContext.putHeader(Task.TRACE_ID, "someTraceId"); + threadContext.putHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER, "kibana"); final DeprecationLogger testLogger = DeprecationLogger.getLogger("org.elasticsearch.test"); testLogger.critical(DeprecationCategory.OTHER, "someKey", "deprecated message1") .compatibleCritical("compatibleKey", "compatible API message"); @@ -178,6 +179,7 @@ public void testCompatibleLog() throws Exception { hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "someKey"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), hasEntry(Task.TRACE_ID, "someTraceId"), + hasEntry(DeprecatedMessage.ELASTIC_ORIGIN_FIELD_NAME, "kibana"), hasEntry("elasticsearch.event.category", "other") ), allOf( @@ -195,6 +197,7 @@ public void testCompatibleLog() throws Exception { hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "compatibleKey"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), hasEntry(Task.TRACE_ID, "someTraceId"), + hasEntry(DeprecatedMessage.ELASTIC_ORIGIN_FIELD_NAME, "kibana"), hasEntry("elasticsearch.event.category", "compatible_api") ) ) @@ -207,8 +210,9 @@ public void testCompatibleLog() throws Exception { public void testParseFieldEmittingDeprecatedLogs() throws Exception { withThreadContext(threadContext -> { - threadContext.putHeader(Task.X_OPAQUE_ID, "someId"); + threadContext.putHeader(Task.X_OPAQUE_ID_HTTP_HEADER, "someId"); threadContext.putHeader(Task.TRACE_ID, "someTraceId"); + threadContext.putHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER, "kibana"); ParseField deprecatedField = new ParseField("new_name", "deprecated_name"); assertTrue(deprecatedField.match("deprecated_name", LoggingDeprecationHandler.INSTANCE)); @@ -247,6 +251,7 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "deprecated_field_deprecated_name"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), hasEntry(Task.TRACE_ID, "someTraceId"), + hasEntry(DeprecatedMessage.ELASTIC_ORIGIN_FIELD_NAME, "kibana"), hasEntry("elasticsearch.event.category", "api") ), // deprecation log for field deprecated_name2 (note it is not being throttled) @@ -264,6 +269,7 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "deprecated_field_deprecated_name2"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), hasEntry(Task.TRACE_ID, "someTraceId"), + hasEntry(DeprecatedMessage.ELASTIC_ORIGIN_FIELD_NAME, "kibana"), hasEntry("elasticsearch.event.category", "api") ), // compatible log line @@ -281,6 +287,7 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "deprecated_field_compatible_deprecated_name"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), hasEntry(Task.TRACE_ID, "someTraceId"), + hasEntry(DeprecatedMessage.ELASTIC_ORIGIN_FIELD_NAME, "kibana"), hasEntry("elasticsearch.event.category", "compatible_api") ) ) @@ -301,8 +308,9 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { public void testDeprecatedMessage() throws Exception { withThreadContext(threadContext -> { - threadContext.putHeader(Task.X_OPAQUE_ID, "someId"); + threadContext.putHeader(Task.X_OPAQUE_ID_HTTP_HEADER, "someId"); threadContext.putHeader(Task.TRACE_ID, "someTraceId"); + threadContext.putHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER, "kibana"); final DeprecationLogger testLogger = DeprecationLogger.getLogger("org.elasticsearch.test"); testLogger.warn(DeprecationCategory.OTHER, "someKey", "deprecated message1"); @@ -330,6 +338,8 @@ public void testDeprecatedMessage() throws Exception { hasKey("ecs.version"), hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "someKey"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), + hasEntry(Task.TRACE_ID, "someTraceId"), + hasEntry(DeprecatedMessage.ELASTIC_ORIGIN_FIELD_NAME, "kibana"), hasEntry("elasticsearch.event.category", "other") ) ) @@ -552,7 +562,7 @@ public void testDuplicateLogMessages() throws Exception { // For the same key and X-Opaque-ID deprecation should be once withThreadContext(threadContext -> { - threadContext.putHeader(Task.X_OPAQUE_ID, "ID1"); + threadContext.putHeader(Task.X_OPAQUE_ID_HTTP_HEADER, "ID1"); deprecationLogger.critical(DeprecationCategory.OTHER, "key", "message1"); deprecationLogger.critical(DeprecationCategory.OTHER, "key", "message2"); assertCriticalWarnings("message1", "message2"); @@ -585,7 +595,7 @@ public void testDuplicateLogMessages() throws Exception { // For the same key and different X-Opaque-ID should be multiple times per key/x-opaque-id // continuing with message1-ID1 in logs already, adding a new deprecation log line with message2-ID2 withThreadContext(threadContext -> { - threadContext.putHeader(Task.X_OPAQUE_ID, "ID2"); + threadContext.putHeader(Task.X_OPAQUE_ID_HTTP_HEADER, "ID2"); deprecationLogger.critical(DeprecationCategory.OTHER, "key", "message1"); deprecationLogger.critical(DeprecationCategory.OTHER, "key", "message2"); assertCriticalWarnings("message1", "message2"); diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 19ac2adfaf985..7f9d1e57adeb7 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -21,7 +21,7 @@ restResources { } } -BuildParams.bwcVersions.withWireCompatiple { bwcVersion, baseName -> +BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> if (bwcVersion != VersionProperties.getElasticsearchVersion()) { /* This project runs the core REST tests against a 4 node cluster where two of diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/HotThreadsIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/HotThreadsIT.java index b2db1953240ec..9c931e15eeee3 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/HotThreadsIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/HotThreadsIT.java @@ -23,7 +23,7 @@ public void testHotThreads() throws Exception { assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty()); assumeTrue( "new nodes are higher version than BWC nodes", - nodes.getNewNodes().get(0).getVersion().compareTo(nodes.getBWCNodes().get(0).getVersion()) > 0 + nodes.getNewNodes().get(0).version().compareTo(nodes.getBWCNodes().get(0).version()) > 0 ); final Request request = new Request("GET", "/_nodes/hot_threads"); final Response response = client().performRequest(request); diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 9878e880724a2..9793809e1b6f8 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -79,7 +79,7 @@ public void testIndexVersionPropagation() throws Exception { Nodes nodes = buildNodeAndVersions(); assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); logger.info("cluster discovered: {}", nodes.toString()); - final List bwcNamesList = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.toList()); + final List bwcNamesList = nodes.getBWCNodes().stream().map(Node::nodeName).collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) @@ -92,7 +92,7 @@ public void testIndexVersionPropagation() throws Exception { try ( RestClient newNodeClient = buildClient( restClientSettings(), - nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new) + nodes.getNewNodes().stream().map(Node::publishAddress).toArray(HttpHost[]::new) ) ) { @@ -104,11 +104,11 @@ public void testIndexVersionPropagation() throws Exception { ensureGreen(index); assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); List shards = buildShards(index, nodes, newNodeClient); - Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); - logger.info("primary resolved to: " + primary.getNode().getNodeName()); + Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::primary).findFirst().get(); + logger.info("primary resolved to: " + primary.node().nodeName()); for (Shard shard : shards) { - assertVersion(index, 1, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc1); - assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 1); + assertVersion(index, 1, "_only_nodes:" + shard.node().nodeName(), finalVersionForDoc1); + assertCount(index, "_only_nodes:" + shard.node().nodeName(), 1); } nUpdates = randomIntBetween(minUpdates, maxUpdates); @@ -116,16 +116,16 @@ public void testIndexVersionPropagation() throws Exception { final int finalVersionForDoc2 = indexDocWithConcurrentUpdates(index, 2, nUpdates); assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); - primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); - logger.info("primary resolved to: " + primary.getNode().getNodeName()); + primary = shards.stream().filter(Shard::primary).findFirst().get(); + logger.info("primary resolved to: " + primary.node().nodeName()); for (Shard shard : shards) { - assertVersion(index, 2, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc2); - assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 2); + assertVersion(index, 2, "_only_nodes:" + shard.node().nodeName(), finalVersionForDoc2); + assertCount(index, "_only_nodes:" + shard.node().nodeName(), 2); } - primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); - logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); - updateIndexSettings(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); + primary = shards.stream().filter(Shard::primary).findFirst().get(); + logger.info("moving primary to new node by excluding {}", primary.node().nodeName()); + updateIndexSettings(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.node().nodeName())); ensureGreen(index); nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after moving primary", nUpdates); @@ -133,8 +133,8 @@ public void testIndexVersionPropagation() throws Exception { assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { - assertVersion(index, 3, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc3); - assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 3); + assertVersion(index, 3, "_only_nodes:" + shard.node().nodeName(), finalVersionForDoc3); + assertCount(index, "_only_nodes:" + shard.node().nodeName(), 3); } logger.info("setting number of replicas to 0"); @@ -146,8 +146,8 @@ public void testIndexVersionPropagation() throws Exception { assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { - assertVersion(index, 4, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc4); - assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 4); + assertVersion(index, 4, "_only_nodes:" + shard.node().nodeName(), finalVersionForDoc4); + assertCount(index, "_only_nodes:" + shard.node().nodeName(), 4); } logger.info("setting number of replicas to 1"); @@ -159,8 +159,8 @@ public void testIndexVersionPropagation() throws Exception { assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { - assertVersion(index, 5, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc5); - assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 5); + assertVersion(index, 5, "_only_nodes:" + shard.node().nodeName(), finalVersionForDoc5); + assertCount(index, "_only_nodes:" + shard.node().nodeName(), 5); } } } @@ -169,7 +169,7 @@ public void testSeqNoCheckpoints() throws Exception { Nodes nodes = buildNodeAndVersions(); assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); logger.info("cluster discovered: {}", nodes.toString()); - final List bwcNamesList = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.toList()); + final List bwcNamesList = nodes.getBWCNodes().stream().map(Node::nodeName).collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) @@ -181,7 +181,7 @@ public void testSeqNoCheckpoints() throws Exception { try ( RestClient newNodeClient = buildClient( restClientSettings(), - nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new) + nodes.getNewNodes().stream().map(Node::publishAddress).toArray(HttpHost[]::new) ) ) { int numDocs = 0; @@ -200,9 +200,9 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("indexing [{}] docs after allowing shards on all nodes", numberOfDocsAfterAllowingShardsOnAllNodes); numDocs += indexDocs(index, numDocs, numberOfDocsAfterAllowingShardsOnAllNodes); assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : 0, newNodeClient); - Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); - logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); - updateIndexSettings(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); + Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::primary).findFirst().get(); + logger.info("moving primary to new node by excluding {}", primary.node().nodeName()); + updateIndexSettings(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.node().nodeName())); ensureGreen(index); int numDocsOnNewPrimary = 0; final int numberOfDocsAfterMovingPrimary = 1 + randomInt(5); @@ -254,7 +254,7 @@ public void testUpdateSnapshotStatus() throws Exception { assertOK(client().performRequest(request)); - String bwcNames = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); + String bwcNames = nodes.getBWCNodes().stream().map(Node::nodeName).collect(Collectors.joining(",")); // Allocating shards on the BWC nodes to makes sure that taking snapshot happens on those nodes. Settings.Builder settings = Settings.builder() @@ -283,14 +283,13 @@ public void testUpdateSnapshotStatus() throws Exception { request.setJsonEntity("{\"indices\": \"" + index + "\"}"); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79930") public void testSyncedFlushTransition() throws Exception { Nodes nodes = buildNodeAndVersions(); - assertTrue("bwc version is on 7.x", nodes.getBWCVersion().before(Version.V_8_0_0)); + assumeTrue("bwc version is on 7.x", nodes.getBWCVersion().before(Version.V_8_0_0)); assumeFalse("no new node found", nodes.getNewNodes().isEmpty()); assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty()); // Allocate shards to new nodes then verify synced flush requests processed by old nodes/new nodes - String newNodes = nodes.getNewNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); + String newNodes = nodes.getNewNodes().stream().map(Node::nodeName).collect(Collectors.joining(",")); int numShards = randomIntBetween(1, 10); int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); int totalShards = numShards * (numOfReplicas + 1); @@ -308,7 +307,7 @@ public void testSyncedFlushTransition() throws Exception { try ( RestClient oldNodeClient = buildClient( restClientSettings(), - nodes.getBWCNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new) + nodes.getBWCNodes().stream().map(Node::publishAddress).toArray(HttpHost[]::new) ) ) { Request request = new Request("POST", index + "/_flush/synced"); @@ -336,7 +335,7 @@ public void testSyncedFlushTransition() throws Exception { try ( RestClient newNodeClient = buildClient( restClientSettings(), - nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new) + nodes.getNewNodes().stream().map(Node::publishAddress).toArray(HttpHost[]::new) ) ) { Request request = new Request("POST", index + "/_flush/synced"); @@ -369,7 +368,7 @@ public void testFlushTransition() throws Exception { assumeFalse("no new node found", nodes.getNewNodes().isEmpty()); assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty()); // Allocate shards to new nodes then verify flush requests processed by old nodes/new nodes - String newNodes = nodes.getNewNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); + String newNodes = nodes.getNewNodes().stream().map(Node::nodeName).collect(Collectors.joining(",")); int numShards = randomIntBetween(1, 10); int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); int totalShards = numShards * (numOfReplicas + 1); @@ -387,7 +386,7 @@ public void testFlushTransition() throws Exception { try ( RestClient oldNodeClient = buildClient( restClientSettings(), - nodes.getBWCNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new) + nodes.getBWCNodes().stream().map(Node::publishAddress).toArray(HttpHost[]::new) ) ) { Request request = new Request("POST", index + "/_flush"); @@ -404,7 +403,7 @@ public void testFlushTransition() throws Exception { try ( RestClient newNodeClient = buildClient( restClientSettings(), - nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new) + nodes.getNewNodes().stream().map(Node::publishAddress).toArray(HttpHost[]::new) ) ) { Request request = new Request("POST", index + "/_flush"); @@ -442,22 +441,22 @@ private void assertSeqNoOnShards(String index, Nodes nodes, int numDocs, RestCli assertBusy(() -> { try { List shards = buildShards(index, nodes, client); - Shard primaryShard = shards.stream().filter(Shard::isPrimary).findFirst().get(); + Shard primaryShard = shards.stream().filter(Shard::primary).findFirst().get(); assertNotNull("failed to find primary shard", primaryShard); final long expectedGlobalCkp = numDocs - 1; final long expectMaxSeqNo = numDocs - 1; - logger.info("primary resolved to node {}", primaryShard.getNode()); + logger.info("primary resolved to node {}", primaryShard.node()); for (Shard shard : shards) { - final SeqNoStats seqNoStats = shard.getSeqNoStats(); - logger.info("stats for {}, primary [{}]: [{}]", shard.getNode(), shard.isPrimary(), seqNoStats); - assertThat("max_seq no on " + shard.getNode() + " is wrong", seqNoStats.getMaxSeqNo(), equalTo(expectMaxSeqNo)); + final SeqNoStats seqNoStats = shard.seqNoStats(); + logger.info("stats for {}, primary [{}]: [{}]", shard.node(), shard.primary(), seqNoStats); + assertThat("max_seq no on " + shard.node() + " is wrong", seqNoStats.getMaxSeqNo(), equalTo(expectMaxSeqNo)); assertThat( - "localCheckpoint no on " + shard.getNode() + " is wrong", + "localCheckpoint no on " + shard.node() + " is wrong", seqNoStats.getLocalCheckpoint(), equalTo(expectMaxSeqNo) ); assertThat( - "globalCheckpoint no on " + shard.getNode() + " is wrong", + "globalCheckpoint no on " + shard.node() + " is wrong", seqNoStats.getGlobalCheckpoint(), equalTo(expectedGlobalCkp) ); @@ -529,24 +528,24 @@ public void setMasterNodeId(String id) { } public void add(Node node) { - put(node.getId(), node); + put(node.id(), node); } public List getNewNodes() { Version bwcVersion = getBWCVersion(); - return values().stream().filter(n -> n.getVersion().after(bwcVersion)).collect(Collectors.toList()); + return values().stream().filter(n -> n.version().after(bwcVersion)).collect(Collectors.toList()); } public List getBWCNodes() { Version bwcVersion = getBWCVersion(); - return values().stream().filter(n -> n.getVersion().equals(bwcVersion)).collect(Collectors.toList()); + return values().stream().filter(n -> n.version().equals(bwcVersion)).collect(Collectors.toList()); } public Version getBWCVersion() { if (isEmpty()) { throw new IllegalStateException("no nodes available"); } - return Version.fromId(values().stream().map(node -> node.getVersion().id).min(Integer::compareTo).get()); + return Version.fromId(values().stream().map(node -> node.version().id).min(Integer::compareTo).get()); } public Node getSafe(String id) { @@ -568,67 +567,7 @@ public String toString() { } } - static final class Node { - private final String id; - private final String nodeName; - private final Version version; - private final HttpHost publishAddress; - - Node(String id, String nodeName, Version version, HttpHost publishAddress) { - this.id = id; - this.nodeName = nodeName; - this.version = version; - this.publishAddress = publishAddress; - } - - public String getId() { - return id; - } - - public String getNodeName() { - return nodeName; - } - - public HttpHost getPublishAddress() { - return publishAddress; - } - - public Version getVersion() { - return version; - } - - @Override - public String toString() { - return "Node{" + "id='" + id + '\'' + ", nodeName='" + nodeName + '\'' + ", version=" + version + '}'; - } - } + record Node(String id, String nodeName, Version version, HttpHost publishAddress) {} - final class Shard { - private final Node node; - private final boolean Primary; - private final SeqNoStats seqNoStats; - - Shard(Node node, boolean primary, SeqNoStats seqNoStats) { - this.node = node; - Primary = primary; - this.seqNoStats = seqNoStats; - } - - public Node getNode() { - return node; - } - - public boolean isPrimary() { - return Primary; - } - - public SeqNoStats getSeqNoStats() { - return seqNoStats; - } - - @Override - public String toString() { - return "Shard{" + "node=" + node + ", Primary=" + Primary + ", seqNoStats=" + seqNoStats + '}'; - } - } + record Shard(Node node, boolean primary, SeqNoStats seqNoStats) {} } diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java index 86e85b98dbe37..758f3557b991a 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java @@ -30,8 +30,10 @@ private int indexDocs(int numDocs, int id) throws Exception { final Request request = new Request("POST", "/_bulk"); final StringBuilder builder = new StringBuilder(); for (int i = 0; i < numDocs; ++i) { - builder.append("{ \"index\" : { \"_index\" : \"" + index + "\", \"_id\": \"" + id++ + "\" } }\n"); - builder.append("{\"str_value\" : \"s" + i + "\"}\n"); + builder.append(""" + { "index" : { "_index" : "%s", "_id": "%s" } } + {"str_value" : "s%s"} + """.formatted(index, id++, i)); } request.setJsonEntity(builder.toString()); assertOK(client().performRequest(request)); @@ -60,9 +62,17 @@ public void testSingleValuedString() throws Exception { private void assertNumRareTerms(int maxDocs, int rareTerms) throws IOException { final Request request = new Request("POST", index + "/_search"); - request.setJsonEntity( - "{\"aggs\" : {\"rareTerms\" : {\"rare_terms\" : {\"field\" : \"str_value.keyword\", \"max_doc_count\" : " + maxDocs + "}}}}" - ); + request.setJsonEntity(""" + { + "aggs": { + "rareTerms": { + "rare_terms": { + "field": "str_value.keyword", + "max_doc_count": %s + } + } + } + }""".formatted(maxDocs)); final Response response = client().performRequest(request); assertOK(response); final Object o = XContentMapValues.extractValue("aggregations.rareTerms.buckets", responseAsMap(response)); diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java index 1908a2a473e96..ef2686409f236 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java @@ -50,8 +50,8 @@ public void prepareTestData() throws IOException { allNodes = new ArrayList<>(); allNodes.addAll(nodes.getBWCNodes()); allNodes.addAll(nodes.getNewNodes()); - bwcVersion = nodes.getBWCNodes().get(0).getVersion(); - newVersion = nodes.getNewNodes().get(0).getVersion(); + bwcVersion = nodes.getBWCNodes().get(0).version(); + newVersion = nodes.getNewNodes().get(0).version(); if (client().performRequest(new Request("HEAD", "/" + index)).getStatusLine().getStatusCode() == 404) { createIndex( @@ -71,9 +71,7 @@ public void prepareTestData() throws IOException { } public void testMinVersionAsNewVersion() throws Exception { - try ( - RestClient client = buildClient(restClientSettings(), allNodes.stream().map(Node::getPublishAddress).toArray(HttpHost[]::new)) - ) { + try (RestClient client = buildClient(restClientSettings(), allNodes.stream().map(Node::publishAddress).toArray(HttpHost[]::new))) { Request newVersionRequest = new Request( "POST", index + "/_search?min_compatible_shard_node=" + newVersion + "&ccs_minimize_roundtrips=false" @@ -84,32 +82,23 @@ public void testMinVersionAsNewVersion() throws Exception { responseException.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.INTERNAL_SERVER_ERROR.getStatus()) ); - assertThat( - responseException.getMessage(), - containsString("{\"error\":{\"root_cause\":[],\"type\":\"search_phase_execution_exception\"") - ); - assertThat( - responseException.getMessage(), - containsString( - "caused_by\":{\"type\":\"version_mismatch_exception\"," - + "\"reason\":\"One of the shards is incompatible with the required minimum version [" - + newVersion - + "]\"" - ) - ); + assertThat(responseException.getMessage(), containsString(""" + {"error":{"root_cause":[],"type":"search_phase_execution_exception\"""")); + assertThat(responseException.getMessage(), containsString(""" + caused_by":{"type":"version_mismatch_exception",\ + "reason":"One of the shards is incompatible with the required minimum version [%s]\"""".formatted(newVersion))); }); } } public void testMinVersionAsOldVersion() throws Exception { - try ( - RestClient client = buildClient(restClientSettings(), allNodes.stream().map(Node::getPublishAddress).toArray(HttpHost[]::new)) - ) { + try (RestClient client = buildClient(restClientSettings(), allNodes.stream().map(Node::publishAddress).toArray(HttpHost[]::new))) { Request oldVersionRequest = new Request( "POST", index + "/_search?min_compatible_shard_node=" + bwcVersion + "&ccs_minimize_roundtrips=false" ); - oldVersionRequest.setJsonEntity("{\"query\":{\"match_all\":{}},\"_source\":false}"); + oldVersionRequest.setJsonEntity(""" + {"query":{"match_all":{}},"_source":false}"""); assertBusy(() -> { assertWithBwcVersionCheck(() -> { Response response = client.performRequest(oldVersionRequest); @@ -127,9 +116,7 @@ public void testMinVersionAsOldVersion() throws Exception { } public void testCcsMinimizeRoundtripsIsFalse() throws Exception { - try ( - RestClient client = buildClient(restClientSettings(), allNodes.stream().map(Node::getPublishAddress).toArray(HttpHost[]::new)) - ) { + try (RestClient client = buildClient(restClientSettings(), allNodes.stream().map(Node::publishAddress).toArray(HttpHost[]::new))) { Version version = randomBoolean() ? newVersion : bwcVersion; Request request = new Request( @@ -143,10 +130,9 @@ public void testCcsMinimizeRoundtripsIsFalse() throws Exception { responseException.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()) ); - assertThat( - responseException.getMessage(), - containsString("{\"error\":{\"root_cause\":[{\"type\":\"action_request_validation_exception\"") - ); + assertThat(responseException.getMessage(), containsString(""" + {"error":{"root_cause":[{"type":"action_request_validation_exception"\ + """)); assertThat( responseException.getMessage(), containsString( diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 9f2fb4d514da8..41d0d0c309aa8 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -178,13 +178,23 @@ private static void indexDocuments(String idPrefix) throws IOException, Interrup int numShards = randomIntBetween(1, 5); CreateIndexRequest createIndexRequest = new CreateIndexRequest(INDEX_NAME); createIndexRequest.settings(Settings.builder().put("index.number_of_shards", numShards).put("index.number_of_replicas", 0)); - createIndexRequest.mapping( - "{\"properties\":{" - + "\"id\":{\"type\":\"keyword\"}," - + "\"suggest\":{\"type\":\"completion\"}," - + "\"join\":{\"type\":\"join\", \"relations\": {\"question\":\"answer\"}}}}", - XContentType.JSON - ); + createIndexRequest.mapping(""" + { + "properties": { + "id": { + "type": "keyword" + }, + "suggest": { + "type": "completion" + }, + "join": { + "type": "join", + "relations": { + "question": "answer" + } + } + } + }""", XContentType.JSON); CreateIndexResponse createIndexResponse = restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); @@ -830,8 +840,7 @@ private static void assertAggs(SearchResponse response) { assertNotNull(response.getAggregations()); List aggregations = response.getAggregations().asList(); for (Aggregation aggregation : aggregations) { - if (aggregation instanceof MultiBucketsAggregation) { - MultiBucketsAggregation multiBucketsAggregation = (MultiBucketsAggregation) aggregation; + if (aggregation instanceof MultiBucketsAggregation multiBucketsAggregation) { assertThat( "agg " + multiBucketsAggregation.getName() + " has 0 buckets", multiBucketsAggregation.getBuckets().size(), diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/100_tsdb.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/100_tsdb.yml index a3c73d7f434d6..274fc92e64d2c 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/100_tsdb.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/100_tsdb.yml @@ -2,8 +2,8 @@ setup: # Create a local tsdb index with a tsid the doesn't overlap with the remote cluster. - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -13,6 +13,9 @@ setup: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -57,8 +60,8 @@ setup: --- teardown: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.delete: @@ -68,8 +71,8 @@ teardown: --- aggregate tsid: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml index 4f46d103c238f..6b6794e6919b4 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml @@ -156,8 +156,8 @@ --- tsdb: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -167,6 +167,9 @@ tsdb: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 9879604cbf24c..d7b6773e461ec 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -48,7 +48,13 @@ */ public class SpawnerNoBootstrapTests extends LuceneTestCase { - private static final String CONTROLLER_SOURCE = "#!/bin/bash\n" + "\n" + "echo I am alive\n" + "\n" + "read SOMETHING\n"; + private static final String CONTROLLER_SOURCE = """ + #!/bin/bash + + echo I am alive + + read SOMETHING + """; /** * Simplest case: a module with no controller daemon. diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveGenerateInitialCredentialsTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveGenerateInitialCredentialsTests.java index c0fb264634712..5713f3670bbec 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveGenerateInitialCredentialsTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveGenerateInitialCredentialsTests.java @@ -55,9 +55,9 @@ public void test20NoAutoGenerationWhenAutoConfigurationDisabled() throws Excepti assumeTrue("expect command isn't on Windows", distribution.platform != Distribution.Platform.WINDOWS); ServerUtils.disableSecurityAutoConfiguration(installation); Shell.Result result = awaitElasticsearchStartupWithResult(runElasticsearchStartCommand(null, false, true)); - assertThat(parseElasticPassword(result.stdout), nullValue()); - assertThat(parseKibanaToken(result.stdout), nullValue()); - assertThat(parseFingerprint(result.stdout), nullValue()); + assertThat(parseElasticPassword(result.stdout()), nullValue()); + assertThat(parseKibanaToken(result.stdout()), nullValue()); + assertThat(parseFingerprint(result.stdout()), nullValue()); stopElasticsearch(); } @@ -79,10 +79,10 @@ public void test40VerifyAutogeneratedCredentials() throws Exception { stopElasticsearch(); ServerUtils.enableSecurityAutoConfiguration(installation); Shell.Result result = awaitElasticsearchStartupWithResult(runElasticsearchStartCommand(null, false, true)); - assertThat(parseElasticPassword(result.stdout), notNullValue()); - assertThat(parseKibanaToken(result.stdout), notNullValue()); - assertThat(parseFingerprint(result.stdout), notNullValue()); - String response = makeRequestAsElastic("https://localhost:9200", parseElasticPassword(result.stdout)); + assertThat(parseElasticPassword(result.stdout()), notNullValue()); + assertThat(parseKibanaToken(result.stdout()), notNullValue()); + assertThat(parseFingerprint(result.stdout()), notNullValue()); + String response = makeRequestAsElastic("https://localhost:9200", parseElasticPassword(result.stdout())); assertThat(response, containsString("You Know, for Search")); } @@ -92,9 +92,9 @@ public void test50CredentialAutogenerationOnlyOnce() throws Exception { assumeTrue("expect command isn't on Windows", distribution.platform != Distribution.Platform.WINDOWS); stopElasticsearch(); Shell.Result result = awaitElasticsearchStartupWithResult(runElasticsearchStartCommand(null, false, true)); - assertThat(parseElasticPassword(result.stdout), nullValue()); - assertThat(parseKibanaToken(result.stdout), nullValue()); - assertThat(parseFingerprint(result.stdout), nullValue()); + assertThat(parseElasticPassword(result.stdout()), nullValue()); + assertThat(parseKibanaToken(result.stdout()), nullValue()); + assertThat(parseFingerprint(result.stdout()), nullValue()); } private String parseElasticPassword(String output) { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java index 80958354531a6..2270ceeeb5fbc 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.packaging.test; +import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.http.client.fluent.Request; import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.FileUtils; @@ -78,7 +80,7 @@ public void test20PluginsListWithNoPlugins() throws Exception { final Installation.Executables bin = installation.executables(); final Result r = bin.pluginTool.run("list"); - assertThat(r.stdout, emptyString()); + assertThat(r.stdout(), emptyString()); } public void test30MissingBundledJdk() throws Exception { @@ -93,8 +95,8 @@ public void test30MissingBundledJdk() throws Exception { } // ask for elasticsearch version to quickly exit if java is actually found (ie test failure) final Result runResult = sh.runIgnoreExitCode(bin.elasticsearch.toString() + " -V"); - assertThat(runResult.exitCode, is(1)); - assertThat(runResult.stderr, containsString("could not find java in bundled JDK")); + assertThat(runResult.exitCode(), is(1)); + assertThat(runResult.stderr(), containsString("could not find java in bundled JDK")); } finally { if (distribution().hasJdk) { mv(relocatedJdk, installation.bundledJdk); @@ -108,8 +110,8 @@ public void test31BadJavaHome() throws Exception { // ask for elasticsearch version to quickly exit if java is actually found (ie test failure) final Result runResult = sh.runIgnoreExitCode(bin.elasticsearch.toString() + " -V"); - assertThat(runResult.exitCode, is(1)); - assertThat(runResult.stderr, containsString("could not find java in ES_JAVA_HOME")); + assertThat(runResult.exitCode(), is(1)); + assertThat(runResult.stderr(), containsString("could not find java in ES_JAVA_HOME")); } public void test32SpecialCharactersInJdkPath() throws Exception { @@ -123,7 +125,7 @@ public void test32SpecialCharactersInJdkPath() throws Exception { mv(installation.bundledJdk, relocatedJdk); // ask for elasticsearch version to avoid starting the app final Result runResult = sh.run(bin.elasticsearch.toString() + " -V"); - assertThat(runResult.stdout, startsWith("Version: ")); + assertThat(runResult.stdout(), startsWith("Version: ")); } finally { mv(relocatedJdk, installation.bundledJdk); } @@ -258,7 +260,9 @@ public void test51AutoConfigurationWithPasswordProtectedKeystore() throws Except Shell.Result result = runElasticsearchStartCommand("some-wrong-password-here", false, false); assertElasticsearchFailure(result, "Provided keystore password was incorrect", null); verifySecurityNotAutoConfigured(installation); - + if (RandomizedTest.randomBoolean()) { + ServerUtils.addSettingToExistingConfiguration(installation, "node.name", "my-custom-random-node-name-here"); + } awaitElasticsearchStartup(runElasticsearchStartCommand(password, true, true)); verifySecurityAutoConfigured(installation); @@ -278,7 +282,9 @@ public void test52AutoConfigurationOnWindows() throws Exception { ); sh.chown(installation.config, installation.getOwner()); FileUtils.assertPathsDoNotExist(installation.data); - + if (RandomizedTest.randomBoolean()) { + ServerUtils.addSettingToExistingConfiguration(installation, "node.name", "my-custom-random-node-name-here"); + } startElasticsearch(); verifySecurityAutoConfigured(installation); stopElasticsearch(); @@ -294,11 +300,11 @@ public void test60StartAndStop() throws Exception { public void test61EsJavaHomeOverride() throws Exception { Platforms.onLinux(() -> { - String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout().trim(); sh.getEnv().put("ES_JAVA_HOME", systemJavaHome1); }); Platforms.onWindows(() -> { - final String systemJavaHome1 = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + final String systemJavaHome1 = sh.run("$Env:SYSTEM_JAVA_HOME").stdout().trim(); sh.getEnv().put("ES_JAVA_HOME", systemJavaHome1); }); @@ -313,13 +319,13 @@ public void test61EsJavaHomeOverride() throws Exception { public void test62JavaHomeIgnored() throws Exception { assumeTrue(distribution().hasJdk); Platforms.onLinux(() -> { - String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout().trim(); sh.getEnv().put("JAVA_HOME", systemJavaHome1); // ensure that ES_JAVA_HOME is not set for the test sh.getEnv().remove("ES_JAVA_HOME"); }); Platforms.onWindows(() -> { - final String systemJavaHome1 = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + final String systemJavaHome1 = sh.run("$Env:SYSTEM_JAVA_HOME").stdout().trim(); sh.getEnv().put("JAVA_HOME", systemJavaHome1); // ensure that ES_JAVA_HOME is not set for the test sh.getEnv().remove("ES_JAVA_HOME"); @@ -327,7 +333,7 @@ public void test62JavaHomeIgnored() throws Exception { final Installation.Executables bin = installation.executables(); final Result runResult = sh.run(bin.elasticsearch.toString() + " -V"); - assertThat(runResult.stderr, containsString("warning: ignoring JAVA_HOME=" + systemJavaHome + "; using bundled JDK")); + assertThat(runResult.stderr(), containsString("warning: ignoring JAVA_HOME=" + systemJavaHome + "; using bundled JDK")); startElasticsearch(); runElasticsearchTests(); @@ -345,11 +351,11 @@ public void test63BundledJdkRemoved() throws Exception { try { mv(installation.bundledJdk, relocatedJdk); Platforms.onLinux(() -> { - String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout().trim(); sh.getEnv().put("ES_JAVA_HOME", systemJavaHome1); }); Platforms.onWindows(() -> { - final String systemJavaHome1 = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + final String systemJavaHome1 = sh.run("$Env:SYSTEM_JAVA_HOME").stdout().trim(); sh.getEnv().put("ES_JAVA_HOME", systemJavaHome1); }); @@ -380,7 +386,7 @@ public void test64JavaHomeWithSpecialCharacters() throws Exception { String pluginListCommand = installation.bin + "/elasticsearch-plugin list"; Result result = sh.run(pluginListCommand); - assertThat(result.exitCode, equalTo(0)); + assertThat(result.exitCode(), equalTo(0)); } finally { // clean up sym link @@ -394,7 +400,7 @@ public void test64JavaHomeWithSpecialCharacters() throws Exception { // Create temporary directory with a space and link to real java home String testJavaHome = Paths.get("/tmp", "java home").toString(); try { - final String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + final String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout().trim(); sh.run("ln -s \"" + systemJavaHome + "\" \"" + testJavaHome + "\""); sh.getEnv().put("ES_JAVA_HOME", testJavaHome); @@ -405,7 +411,7 @@ public void test64JavaHomeWithSpecialCharacters() throws Exception { String pluginListCommand = installation.bin + "/elasticsearch-plugin list"; Result result = sh.run(pluginListCommand); - assertThat(result.exitCode, equalTo(0)); + assertThat(result.exitCode(), equalTo(0)); } finally { FileUtils.rm(Paths.get(testJavaHome)); } @@ -534,7 +540,8 @@ public void test74CustomJvmOptionsTotalMemoryOverride() throws Exception { public void test80RelativePathConf() throws Exception { withCustomConfig(tempConf -> { - append(tempConf.resolve("elasticsearch.yml"), "node.name: relative"); + ServerUtils.removeSettingFromExistingConfiguration(tempConf, "node.name"); + ServerUtils.addSettingToExistingConfiguration(tempConf, "node.name", "relative"); startElasticsearch(); final String nodesResponse = makeRequest("https://localhost:9200/_nodes"); @@ -550,12 +557,12 @@ public void test90SecurityCliPackaging() throws Exception { assertThat(installation.lib.resolve("tools").resolve("security-cli"), fileExists()); final Platforms.PlatformAction action = () -> { Result result = sh.run(bin.certutilTool + " --help"); - assertThat(result.stdout, containsString("Simplifies certificate creation for use with the Elastic Stack")); + assertThat(result.stdout(), containsString("Simplifies certificate creation for use with the Elastic Stack")); // Ensure that the exit code from the java command is passed back up through the shell script result = sh.runIgnoreExitCode(bin.certutilTool + " invalid-command"); - assertThat(result.exitCode, is(not(0))); - assertThat(result.stderr, containsString("Unknown command [invalid-command]")); + assertThat(result.exitCode(), is(not(0))); + assertThat(result.stderr(), containsString("Unknown command [invalid-command]")); }; Platforms.onLinux(action); Platforms.onWindows(action); @@ -566,7 +573,7 @@ public void test91ElasticsearchShardCliPackaging() throws Exception { Platforms.PlatformAction action = () -> { final Result result = sh.run(bin.shardTool + " -h"); - assertThat(result.stdout, containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); + assertThat(result.stdout(), containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); }; Platforms.onLinux(action); @@ -578,7 +585,7 @@ public void test92ElasticsearchNodeCliPackaging() throws Exception { Platforms.PlatformAction action = () -> { final Result result = sh.run(bin.nodeTool + " -h"); - assertThat(result.stdout, containsString("A CLI tool to do unsafe cluster and index manipulations on current node")); + assertThat(result.stdout(), containsString("A CLI tool to do unsafe cluster and index manipulations on current node")); }; Platforms.onLinux(action); @@ -588,7 +595,6 @@ public void test92ElasticsearchNodeCliPackaging() throws Exception { public void test93ElasticsearchNodeCustomDataPathAndNotEsHomeWorkDir() throws Exception { Path relativeDataPath = installation.data.relativize(installation.home); append(installation.config("elasticsearch.yml"), "path.data: " + relativeDataPath); - sh.setWorkingDirectory(getRootTempDir()); startElasticsearch(); @@ -600,7 +606,7 @@ public void test93ElasticsearchNodeCustomDataPathAndNotEsHomeWorkDir() throws Ex } Result result = sh.run("echo y | " + nodeTool + " unsafe-bootstrap"); - assertThat(result.stdout, containsString("Master node was successfully bootstrapped")); + assertThat(result.stdout(), containsString("Master node was successfully bootstrapped")); } public void test94ElasticsearchNodeExecuteCliNotEsHomeWorkDir() throws Exception { @@ -610,15 +616,15 @@ public void test94ElasticsearchNodeExecuteCliNotEsHomeWorkDir() throws Exception Platforms.PlatformAction action = () -> { Result result = sh.run(bin.certutilTool + " -h"); - assertThat(result.stdout, containsString("Simplifies certificate creation for use with the Elastic Stack")); + assertThat(result.stdout(), containsString("Simplifies certificate creation for use with the Elastic Stack")); result = sh.run(bin.syskeygenTool + " -h"); - assertThat(result.stdout, containsString("system key tool")); + assertThat(result.stdout(), containsString("system key tool")); result = sh.run(bin.setupPasswordsTool + " -h"); - assertThat(result.stdout, containsString("Sets the passwords for reserved users")); + assertThat(result.stdout(), containsString("Sets the passwords for reserved users")); result = sh.run(bin.usersTool + " -h"); - assertThat(result.stdout, containsString("Manages elasticsearch file users")); + assertThat(result.stdout(), containsString("Manages elasticsearch file users")); result = sh.run(bin.serviceTokensTool + " -h"); - assertThat(result.stdout, containsString("Manages elasticsearch service account file-tokens")); + assertThat(result.stdout(), containsString("Manages elasticsearch service account file-tokens")); }; Platforms.onLinux(action); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java index a5f7f6fa66649..03b820de58668 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.packaging.test; import org.apache.http.client.fluent.Request; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.packaging.util.FileUtils; import org.elasticsearch.packaging.util.Platforms; import org.elasticsearch.packaging.util.ServerUtils; @@ -54,7 +55,7 @@ public void test10Install() throws Exception { public void test20Help() { Shell.Result result = installation.executables().certgenTool.run("--help"); - assertThat(result.stdout, containsString("Simplifies certificate creation")); + assertThat(result.stdout(), containsString("Simplifies certificate creation")); } public void test30Generate() throws Exception { @@ -96,33 +97,33 @@ public void test40RunWithCert() throws Exception { final String keyPath = escapePath(installation.config("certs/mynode/mynode.key")); final String certPath = escapePath(installation.config("certs/mynode/mynode.crt")); final String caCertPath = escapePath(installation.config("certs/ca/ca.crt")); - + final Settings settings = Settings.builder().loadFromPath(installation.config("elasticsearch.yml")).build(); // Replace possibly auto-configured TLS settings with ones pointing to the material generated with certgen // (we do disable auto-configuration above but for packaged installations TLS auto-config happens on installation time and is // not affected by this setting - final List newTlsConfig = List.of( - "node.name: mynode", - "xpack.security.transport.ssl.key: " + keyPath, - "xpack.security.transport.ssl.certificate: " + certPath, - "xpack.security.transport.ssl.certificate_authorities: [\"" + caCertPath + "\"]", - "xpack.security.http.ssl.key: " + keyPath, - "xpack.security.http.ssl.certificate: " + certPath, - "xpack.security.http.ssl.certificate_authorities: [\"" + caCertPath + "\"]", - "xpack.security.transport.ssl.enabled: true", - "xpack.security.http.ssl.enabled: true" + final Settings newSettings = Settings.builder() + .put( + settings.filter(k -> k.startsWith("xpack.security") == false) + .filter(k -> k.equals("node.name") == false) + .filter(k -> k.equals("http.host") == false) + .filter(k -> k.equals("cluster.initial_master_nodes") == false) + + ) + .put("node.name", "mynode") + .put("xpack.security.transport.ssl.key", keyPath) + .put("xpack.security.transport.ssl.certificate", certPath) + .put("xpack.security.transport.ssl.certificate_authorities", caCertPath) + .put("xpack.security.http.ssl.key", keyPath) + .put("xpack.security.http.ssl.certificate", certPath) + .putList("xpack.security.http.ssl.certificate_authorities", caCertPath) + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.security.http.ssl.enabled", true) + .build(); + Files.write( + installation.config("elasticsearch.yml"), + newSettings.keySet().stream().map(k -> k + ": " + newSettings.get(k)).collect(Collectors.toList()), + TRUNCATE_EXISTING ); - List existingConfig = Files.readAllLines(installation.config("elasticsearch.yml")); - List newConfig = existingConfig.stream() - .filter(l -> l.startsWith("node.name:") == false) - .filter(l -> l.startsWith("xpack.security.transport.ssl.") == false) - .filter(l -> l.startsWith("xpack.security.http.ssl.") == false) - .filter(l -> l.startsWith("xpack.security.enabled") == false) - .filter(l -> l.startsWith("http.host") == false) - .filter(l -> l.startsWith("cluster.initial_master_nodes") == false) - .collect(Collectors.toList()); - newConfig.addAll(newTlsConfig); - - Files.write(installation.config("elasticsearch.yml"), newConfig, TRUNCATE_EXISTING); assertWhileRunning(() -> { final String password = setElasticPassword(); @@ -133,7 +134,7 @@ public void test40RunWithCert() throws Exception { private String setElasticPassword() { Shell.Result result = installation.executables().resetPasswordTool.run("--auto --batch --silent --username elastic", null); - return result.stdout; + return result.stdout(); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java index 6dd68416c4298..87d6aee2798c2 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java @@ -37,6 +37,10 @@ public void test20HostnameSubstitution() throws Exception { FileUtils.append(confPath.resolve("elasticsearch.yml"), "node.name: ${HOSTNAME}"); if (distribution.isPackage()) { append(installation.envFile, "HOSTNAME=mytesthost"); + // In packages, we would have set cluster.initial_master_nodes pointing to the original HOSTNAME upon installation + // We need to update that if we change HOSTNAME since node.name points to that, otherwise the cluster can't form + ServerUtils.removeSettingFromExistingConfiguration(confPath, "cluster.initial_master_nodes"); + ServerUtils.addSettingToExistingConfiguration(confPath, "cluster.initial_master_nodes", "[\"${HOSTNAME}\"]"); } // security auto-config requires that the archive owner and the node process user be the same Platforms.onWindows(() -> sh.chown(confPath, installation.getOwner())); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/CronEvalCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/CronEvalCliTests.java index 38221bc0c1f6a..3f5090d42dcb4 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/CronEvalCliTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/CronEvalCliTests.java @@ -27,11 +27,11 @@ public void test10Install() throws Exception { public void test20Help() throws Exception { Shell.Result result = installation.executables().cronevalTool.run("--help"); - assertThat(result.stdout, containsString("Validates and evaluates a cron expression")); + assertThat(result.stdout(), containsString("Validates and evaluates a cron expression")); } public void test30Run() throws Exception { Shell.Result result = installation.executables().cronevalTool.run("'0 0 20 ? * MON-THU' -c 2"); - assertThat(result.stdout, containsString("Valid!")); + assertThat(result.stdout(), containsString("Valid!")); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java index 37277cecbbdb2..793bb7ab6802c 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java @@ -29,10 +29,13 @@ public static void filterDistros() { public void test05CheckLintian() { String extraArgs = ""; - if (sh.run("lintian --help").stdout.contains("fail-on-warnings")) { - extraArgs = "--fail-on-warnings "; + final String helpText = sh.run("lintian --help").stdout(); + if (helpText.contains("fail-on-warnings")) { + extraArgs = "--fail-on-warnings"; + } else if (helpText.contains("--fail-on error")) { + extraArgs = "--fail-on warning"; } - sh.run("lintian " + extraArgs + FileUtils.getDistributionFile(distribution())); + sh.run("lintian %s %s".formatted(extraArgs, FileUtils.getDistributionFile(distribution()))); } public void test06Dependencies() { @@ -41,9 +44,9 @@ public void test06Dependencies() { final Shell.Result result = sh.run("dpkg -I " + getDistributionFile(distribution())); - TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(result.stdout).find()); + TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(result.stdout()).find()); String oppositePackageName = "elasticsearch-oss"; - TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: " + oppositePackageName + "$").matcher(result.stdout).find()); + TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: " + oppositePackageName + "$").matcher(result.stdout()).find()); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java index f30bcdbe4e86d..4f2048aa00438 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java @@ -82,6 +82,6 @@ public void test30Purge() throws Exception { assertPathsDoNotExist(installation.config, installation.envFile); - assertThat(packageStatus(distribution()).exitCode, is(1)); + assertThat(packageStatus(distribution()).exitCode(), is(1)); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 33c9ec77e7821..dc92789be15ea 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -20,8 +20,8 @@ import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; import org.elasticsearch.packaging.util.docker.DockerRun; +import org.elasticsearch.packaging.util.docker.DockerShell; import org.elasticsearch.packaging.util.docker.MockServer; -import org.hamcrest.Matcher; import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; @@ -35,7 +35,6 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.StringJoiner; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -76,6 +75,7 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; @@ -147,7 +147,9 @@ public void test011SecurityEnabledStatus() throws Exception { */ public void test012SecurityCanBeDisabled() throws Exception { // restart container with security disabled - runContainer(distribution(), builder().envVar("xpack.security.enabled", "false")); + // We need to set discovery to single-node as with security disabled, autoconfiguration won't run and we won't set + // cluster.initial_master_nodes + runContainer(distribution(), builder().envVar("xpack.security.enabled", "false").envVar("discovery.type", "single-node")); waitForElasticsearch(installation); final int unauthStatusCode = ServerUtils.makeRequestAndGetStatus(Request.Get("http://localhost:9200"), null, null, null); assertThat(unauthStatusCode, equalTo(200)); @@ -165,32 +167,13 @@ public void test020PluginsListWithNoPlugins() { final Installation.Executables bin = installation.executables(); final Result r = sh.run(bin.pluginTool + " list"); - assertThat("Expected no plugins to be listed", r.stdout, emptyString()); - } - - /** - * Check that Cloud images bundle a selection of plugins. - */ - public void test021PluginsListWithDefaultCloudPlugins() { - assumeTrue( - "Only applies to Cloud images", - distribution.packaging == Packaging.DOCKER_CLOUD || distribution().packaging == Packaging.DOCKER_CLOUD_ESS - ); - - final Installation.Executables bin = installation.executables(); - final List plugins = sh.run(bin.pluginTool + " list").stdout.lines().collect(Collectors.toList()); - - assertThat( - "Expected standard plugins to be listed", - plugins, - equalTo(List.of("repository-azure", "repository-gcs", "repository-s3")) - ); + assertThat("Expected no plugins to be listed", r.stdout(), emptyString()); } /** * Check that a plugin can be installed without special permissions. */ - public void test022InstallPlugin() { + public void test021InstallPlugin() { runContainer( distribution(), builder().envVar("ELASTIC_PASSWORD", PASSWORD).volume(Path.of(EXAMPLE_PLUGIN_PATH), "/analysis-icu.zip") @@ -202,20 +185,13 @@ public void test022InstallPlugin() { final Installation.Executables bin = installation.executables(); sh.run(bin.pluginTool + " install file:///analysis-icu.zip"); - final boolean isCloudImage = distribution().packaging == Packaging.DOCKER_CLOUD - || distribution().packaging == Packaging.DOCKER_CLOUD_ESS; - - final Matcher> matcher = isCloudImage - ? containsInAnyOrder("repository-azure", "repository-gcs", "repository-s3", "analysis-icu") - : equalTo(List.of("analysis-icu")); - - assertThat("Expected installed plugins to be listed", listPlugins(), matcher); + assertThat("Expected installed plugins to be listed", listPlugins(), equalTo(List.of("analysis-icu"))); } /** * Checks that ESS images can install plugins from the local archive. */ - public void test023InstallPluginsFromLocalArchive() { + public void test022InstallPluginsFromLocalArchive() { assumeTrue("Only ESS images have a local archive", distribution().packaging == Packaging.DOCKER_CLOUD_ESS); final String plugin = "analysis-icu"; @@ -234,22 +210,13 @@ public void test023InstallPluginsFromLocalArchive() { /** * Checks that plugins can be installed by deploying a plugins config file. */ - public void test024InstallPluginUsingConfigFile() { - final boolean isCloudImage = distribution().packaging == Packaging.DOCKER_CLOUD - || distribution().packaging == Packaging.DOCKER_CLOUD_ESS; - - final StringJoiner pluginsDescriptor = new StringJoiner("\n", "", "\n"); - pluginsDescriptor.add("plugins:"); - pluginsDescriptor.add(" - id: analysis-icu"); - pluginsDescriptor.add(" location: file:///analysis-icu.zip"); - if (isCloudImage) { - // The repository plugins have to be present, because (1) they are preinstalled, and (2) they - // are owned by `root` and can't be removed. - Stream.of("repository-s3", "repository-azure", "repository-gcs").forEach(plugin -> pluginsDescriptor.add(" - id: " + plugin)); - } - + public void test023InstallPluginUsingConfigFile() { final String filename = "elasticsearch-plugins.yml"; - append(tempDir.resolve(filename), pluginsDescriptor.toString()); + append(tempDir.resolve(filename), """ + plugins: + - id: analysis-icu + location: file:///analysis-icu.zip + """); // Restart the container. This will sync the plugins automatically. Also // stuff the proxy settings with garbage, so any attempt to go out to the internet would fail. The @@ -274,21 +241,15 @@ public void test024InstallPluginUsingConfigFile() { /** * Checks that ESS images can manage plugins from the local archive by deploying a plugins config file. */ - public void test025InstallPluginFromArchiveUsingConfigFile() { + public void test024InstallPluginFromArchiveUsingConfigFile() { assumeTrue("Only ESS image has a plugin archive", distribution().packaging == Packaging.DOCKER_CLOUD_ESS); - // The repository plugins have to be present, because (1) they are preinstalled, and (2) they - // are owned by `root` and can't be removed. - final String[] plugins = { "repository-s3", "repository-azure", "repository-gcs", "analysis-icu", "analysis-phonetic" }; - - final StringJoiner pluginsDescriptor = new StringJoiner("\n", "", "\n"); - pluginsDescriptor.add("plugins:"); - for (String plugin : plugins) { - pluginsDescriptor.add(" - id: " + plugin); - } - final String filename = "elasticsearch-plugins.yml"; - append(tempDir.resolve(filename), pluginsDescriptor.toString()); + append(tempDir.resolve(filename), """ + plugins: + - id: analysis-icu + - id: analysis-phonetic + """); // Restart the container. This will sync the plugins automatically. Also // stuff the proxy settings with garbage, so any attempt to go out to the internet would fail. The @@ -306,40 +267,31 @@ public void test025InstallPluginFromArchiveUsingConfigFile() { // Since ES is doing the installing, give it a chance to complete waitForElasticsearch(installation, "elastic", PASSWORD); - assertThat("List of installed plugins is incorrect", listPlugins(), containsInAnyOrder(plugins)); + assertThat("List of installed plugins is incorrect", listPlugins(), containsInAnyOrder("analysis-icu", "analysis-phonetic")); } /** * Check that when using Elasticsearch's plugins sync capability, it will use a proxy when configured to do so. * This could either be in the plugins config file, or via the standard Java system properties. */ - public void test024SyncPluginsUsingProxy() { + public void test025SyncPluginsUsingProxy() { MockServer.withMockServer(mockServer -> { for (boolean useConfigFile : List.of(true, false)) { mockServer.clearExpectations(); - final StringJoiner config = new StringJoiner("\n", "", "\n"); - config.add("plugins:"); - // The repository plugins have to be present for Cloud images, because (1) they are preinstalled, and (2) they - // are owned by `root` and can't be removed. - if (distribution().packaging == Packaging.DOCKER_CLOUD || distribution().packaging == Packaging.DOCKER_CLOUD_ESS) { - for (String plugin : List.of("repository-s3", "repository-azure", "repository-gcs", "analysis-icu")) { - config.add(" - id: " + plugin); - } - } - // This is the new plugin to install. We don't use an official plugin because then Elasticsearch - // will attempt an SSL connection and that just makes everything more complicated. - config.add(" - id: my-plugin"); - config.add(" location: http://example.com/my-plugin.zip"); - - if (useConfigFile) { - config.add("proxy: mockserver:" + mockServer.getPort()); - } + final String config = """ + plugins: + # This is the new plugin to install. We don't use an official plugin because then Elasticsearch + # will attempt an SSL connection and that just makes everything more complicated. + - id: my-plugin + location: http://example.com/my-plugin.zip + %s + """.formatted(useConfigFile ? "proxy: mockserver:" + mockServer.getPort() : ""); final String filename = "elasticsearch-plugins.yml"; final Path pluginsConfigPath = tempDir.resolve(filename); deleteIfExists(pluginsConfigPath); - append(pluginsConfigPath, config.toString()); + append(pluginsConfigPath, config); final DockerRun builder = builder().volume(pluginsConfigPath, installation.config.resolve(filename)) .extraArgs("--link " + mockServer.getContainerId() + ":mockserver"); @@ -354,7 +306,7 @@ public void test024SyncPluginsUsingProxy() { final List> interactions = mockServer.getInteractions(); - assertThat(result.stderr, containsString("FileNotFoundException: http://example.com/my-plugin.zip")); + assertThat(result.stderr(), containsString("FileNotFoundException: http://example.com/my-plugin.zip")); // Now check that Elasticsearch did use the proxy server assertThat(interactions, hasSize(1)); @@ -367,16 +319,64 @@ public void test024SyncPluginsUsingProxy() { }); } + /** + * Check that attempting to install the repository plugins that have been migrated to modules succeeds, but does nothing. + */ + public void test026InstallBundledRepositoryPlugins() { + assertThat("Expected no plugins to be installed", listPlugins(), is(empty())); + + installation.executables().pluginTool.run("install repository-azure repository-gcs repository-s3"); + + assertThat("Still expected no plugins to be installed", listPlugins(), is(empty())); + + // Removal should also succeed + installation.executables().pluginTool.run("remove repository-azure repository-gcs repository-s3"); + } + + /** + * Check that attempting to install the repository plugins that have been migrated to modules succeeds + * when using a plugins config file but does nothing. + */ + public void test026InstallBundledRepositoryPluginsViaConfigFile() { + final String filename = "elasticsearch-plugins.yml"; + append(tempDir.resolve(filename), """ + plugins: + - id: repository-azure + - id: repository-gcs + - id: repository-s3 + """); + + // Restart the container. This will sync the plugins automatically. Also + // stuff the proxy settings with garbage, so any attempt to go out to the internet would fail. The + // sync shouldn't be doing anything anyway. + runContainer( + distribution(), + builder().volume(tempDir.resolve(filename), installation.config.resolve(filename)) + .envVar("ELASTIC_PASSWORD", PASSWORD) + .envVar( + "ES_JAVA_OPTS", + "-Dhttp.proxyHost=example.org -Dhttp.proxyPort=9999 -Dhttps.proxyHost=example.org -Dhttps.proxyPort=9999" + ) + ); + + // Since ES is doing the installing, give it a chance to complete + waitForElasticsearch(installation, "elastic", PASSWORD); + + assertThat("Expected no plugins to be installed", listPlugins(), is(empty())); + } + /** * Check that the JDK's `cacerts` file is a symlink to the copy provided by the operating system. */ public void test040JavaUsesTheOsProvidedKeystore() { - final String path = sh.run("realpath jdk/lib/security/cacerts").stdout; + final String path = sh.run("realpath jdk/lib/security/cacerts").stdout(); if (distribution.packaging == Packaging.DOCKER_UBI || distribution.packaging == Packaging.DOCKER_IRON_BANK) { + // In these images, the `cacerts` file ought to be a symlink here assertThat(path, equalTo("/etc/pki/ca-trust/extracted/java/cacerts")); } else { - assertThat(path, equalTo("/etc/ssl/certs/java/cacerts")); + // Whereas on other images, it's a real file so the real path is the same + assertThat(path, equalTo("/usr/share/elasticsearch/jdk/lib/security/cacerts")); } } @@ -384,12 +384,10 @@ public void test040JavaUsesTheOsProvidedKeystore() { * Checks that there are Amazon trusted certificates in the cacaerts keystore. */ public void test041AmazonCaCertsAreInTheKeystore() { - final String caName = distribution.packaging == Packaging.DOCKER_UBI || distribution.packaging == Packaging.DOCKER_IRON_BANK - ? "amazonrootca" - : "amazon_root_ca"; - - final boolean matches = sh.run("jdk/bin/keytool -cacerts -storepass changeit -list | grep trustedCertEntry").stdout.lines() - .anyMatch(line -> line.contains(caName)); + final boolean matches = sh.run("jdk/bin/keytool -cacerts -storepass changeit -list | grep trustedCertEntry") + .stdout() + .lines() + .anyMatch(line -> line.contains("amazonrootca")); assertTrue("Expected Amazon trusted cert in cacerts", matches); } @@ -415,6 +413,17 @@ public void test050BasicApiTests() throws Exception { runElasticsearchTestsAsElastic(PASSWORD); } + /** + * Check that the JDK uses the Cloudflare zlib, instead of the default one. + */ + public void test060JavaUsesCloudflareZlib() { + waitForElasticsearch(installation, "elastic", PASSWORD); + + final String output = sh.run("bash -c 'pmap -p $(pidof java)'").stdout(); + + assertThat("Expected java to be using cloudflare-zlib", output, containsString("cloudflare-zlib")); + } + /** * Check that the default config can be overridden using a bind mount, and that env vars are respected */ @@ -422,7 +431,7 @@ public void test070BindMountCustomPathConfAndJvmOptions() throws Exception { copyFromContainer(installation.config("elasticsearch.yml"), tempDir.resolve("elasticsearch.yml")); copyFromContainer(installation.config("elasticsearch.keystore"), tempDir.resolve("elasticsearch.keystore")); copyFromContainer(installation.config("log4j2.properties"), tempDir.resolve("log4j2.properties")); - final Path autoConfigurationDir = findInContainer(installation.config, "d", "\"tls_auto_config_*\""); + final Path autoConfigurationDir = findInContainer(installation.config, "d", "\"certs\""); final String autoConfigurationDirName = autoConfigurationDir.getFileName().toString(); copyFromContainer(autoConfigurationDir, tempDir.resolve(autoConfigurationDirName)); @@ -441,11 +450,15 @@ public void test070BindMountCustomPathConfAndJvmOptions() throws Exception { Files.setPosixFilePermissions(tempDir.resolve(autoConfigurationDirName), p750); // Restart the container + // We need to set discovery to single-node as autoconfiguration has already run when the node started the first time + // cluster.initial_master_nodes is set to the name of the original docker container + ServerUtils.removeSettingFromExistingConfiguration(tempDir, "cluster.initial_master_nodes"); runContainer( distribution(), builder().volume(tempDir, "/usr/share/elasticsearch/config") .envVar("ES_JAVA_OPTS", "-XX:-UseCompressedOops") .envVar("ELASTIC_PASSWORD", PASSWORD) + .envVar("discovery.type", "single-node") ); waitForElasticsearch(installation, "elastic", PASSWORD); @@ -514,7 +527,7 @@ public void test072RunEsAsDifferentUserAndGroup() throws Exception { copyFromContainer(installation.config("jvm.options"), tempEsConfigDir); copyFromContainer(installation.config("elasticsearch.keystore"), tempEsConfigDir); copyFromContainer(installation.config("log4j2.properties"), tempEsConfigDir); - final Path autoConfigurationDir = findInContainer(installation.config, "d", "\"tls_auto_config_*\""); + final Path autoConfigurationDir = findInContainer(installation.config, "d", "\"certs\""); assertThat(autoConfigurationDir, notNullValue()); final String autoConfigurationDirName = autoConfigurationDir.getFileName().toString(); copyFromContainer(autoConfigurationDir, tempEsConfigDir.resolve(autoConfigurationDirName)); @@ -525,6 +538,9 @@ public void test072RunEsAsDifferentUserAndGroup() throws Exception { try { // Restart the container + // We need to set discovery to single-node as autoconfiguration has already run when the node started the first time + // cluster.initial_master_nodes is set to the name of the original docker container + ServerUtils.removeSettingFromExistingConfiguration(tempEsConfigDir, "cluster.initial_master_nodes"); runContainer( distribution(), builder().envVar("ELASTIC_PASSWORD", PASSWORD) @@ -532,6 +548,7 @@ public void test072RunEsAsDifferentUserAndGroup() throws Exception { .volume(tempEsDataDir.toAbsolutePath(), installation.data) .volume(tempEsConfigDir.toAbsolutePath(), installation.config) .volume(tempEsLogsDir.toAbsolutePath(), installation.logs) + .envVar("discovery.type", "single-node") ); waitForElasticsearch(installation, "elastic", PASSWORD); @@ -549,7 +566,12 @@ public void test072RunEsAsDifferentUserAndGroup() throws Exception { */ public void test073RunEsAsDifferentUserAndGroupWithoutBindMounting() { // Restart the container - runContainer(distribution(), builder().extraArgs("--group-add 0").uid(501, 501).envVar("ELASTIC_PASSWORD", PASSWORD)); + // We need to set discovery to single-node as autoconfiguration won't run, and we won't set + // cluster.initial_master_nodes + runContainer( + distribution(), + builder().extraArgs("--group-add 0").uid(501, 501).envVar("ELASTIC_PASSWORD", PASSWORD).envVar("discovery.type", "single-node") + ); waitForElasticsearch(installation, "elastic", PASSWORD); } @@ -649,7 +671,7 @@ public void test082CannotUseEnvVarsAndFiles() throws Exception { ); assertThat( - dockerLogs.stderr, + dockerLogs.stderr(), containsString("ERROR: Both ELASTIC_PASSWORD_FILE and ELASTIC_PASSWORD are set. These are mutually exclusive.") ); } @@ -673,7 +695,7 @@ public void test083EnvironmentVariablesUsingFilesHaveCorrectPermissions() throws ); assertThat( - dockerLogs.stderr, + dockerLogs.stderr(), containsString( "ERROR: File /run/secrets/" + passwordFilename + " from ELASTIC_PASSWORD_FILE must have file permissions 400 or 600" ) @@ -709,7 +731,7 @@ public void test084SymlinkToFileWithInvalidPermissionsIsRejected() throws Except ); assertThat( - dockerLogs.stderr, + dockerLogs.stderr(), containsString( "ERROR: File " + passwordFilename @@ -724,7 +746,7 @@ public void test084SymlinkToFileWithInvalidPermissionsIsRejected() throws Except * Check that environment variables are translated to -E options even for commands invoked under * `docker exec`, where the Docker image's entrypoint is not executed. */ - public void test085EnvironmentVariablesAreRespectedUnderDockerExec() throws Exception { + public void test085EnvironmentVariablesAreRespectedUnderDockerExec() { installation = runContainer(distribution(), builder().envVar("ELASTIC_PASSWORD", "hunter2")); // The tool below requires a keystore, so ensure that ES is fully initialised before proceeding. @@ -736,7 +758,7 @@ public void test085EnvironmentVariablesAreRespectedUnderDockerExec() throws Exce final Result result = sh.runIgnoreExitCode("bash -c 'echo y | elasticsearch-setup-passwords auto'"); assertFalse("elasticsearch-setup-passwords command should have failed", result.isSuccess()); - assertThat(result.stdout, containsString("java.net.UnknownHostException: this.is.not.valid")); + assertThat(result.stdout(), containsString("java.net.UnknownHostException: this.is.not.valid")); } /** @@ -752,7 +774,9 @@ public void test086EnvironmentVariablesInSnakeCaseAreTranslated() { // Note the double-underscore in the var name here, which retains the underscore in translation installation = runContainer(distribution(), builder().envVar("ES_SETTING_XPACK_SECURITY_FIPS__MODE_ENABLED", "false")); - final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines() + final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'") + .stdout() + .lines() .filter(line -> line.contains("org.elasticsearch.bootstrap.Elasticsearch")) .findFirst(); @@ -778,7 +802,9 @@ public void test087EnvironmentVariablesInIncorrectFormatAreIgnored() { .envVar("es_setting_xpack_security_fips__mode_enabled", "false") ); - final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines() + final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'") + .stdout() + .lines() .filter(line -> line.contains("org.elasticsearch.bootstrap.Elasticsearch")) .findFirst(); @@ -801,7 +827,9 @@ public void test088EnvironmentVariablesInDottedFormatArePassedThrough() { builder().envVar("xpack.security.fips_mode.enabled", "false").envVar("http.cors.allow-methods", "GET") ); - final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines() + final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'") + .stdout() + .lines() .filter(line -> line.contains("org.elasticsearch.bootstrap.Elasticsearch")) .findFirst(); @@ -825,12 +853,12 @@ public void test090SecurityCliPackaging() { assertTrue(existsInContainer(securityCli)); Result result = sh.run(bin.certutilTool + " --help"); - assertThat(result.stdout, containsString("Simplifies certificate creation for use with the Elastic Stack")); + assertThat(result.stdout(), containsString("Simplifies certificate creation for use with the Elastic Stack")); // Ensure that the exit code from the java command is passed back up through the shell script result = sh.runIgnoreExitCode(bin.certutilTool + " invalid-command"); assertThat(result.isSuccess(), is(false)); - assertThat(result.stdout, containsString("Unknown command [invalid-command]")); + assertThat(result.stdout(), containsString("Unknown command [invalid-command]")); } /** @@ -840,7 +868,7 @@ public void test091ElasticsearchShardCliPackaging() { final Installation.Executables bin = installation.executables(); final Result result = sh.run(bin.shardTool + " -h"); - assertThat(result.stdout, containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); + assertThat(result.stdout(), containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); } /** @@ -852,7 +880,7 @@ public void test092ElasticsearchNodeCliPackaging() { final Result result = sh.run(bin.nodeTool + " -h"); assertThat( "Failed to find expected message about the elasticsearch-node CLI tool", - result.stdout, + result.stdout(), containsString("A CLI tool to do unsafe cluster and index manipulations on current node") ); } @@ -877,7 +905,7 @@ public void test101AllFilesAreGroupZero() { final Shell localSh = new Shell(); final String findResults = localSh.run( "docker run --rm --tty " + DockerRun.getImageName(distribution) + " bash -c ' touch data/test && find . \\! -group 0 ' " - ).stdout; + ).stdout(); assertThat("Found some files whose GID != 0", findResults, is(emptyString())); } @@ -958,8 +986,8 @@ public void test120DockerLogsIncludeElasticsearchLogs() { waitForElasticsearch(installation, "elastic", PASSWORD); final Result containerLogs = getContainerLogs(); - assertThat("Container logs should contain full class names", containerLogs.stdout, containsString("org.elasticsearch.node.Node")); - assertThat("Container logs don't contain INFO level messages", containerLogs.stdout, containsString("INFO")); + assertThat("Container logs should contain full class names", containerLogs.stdout(), containsString("org.elasticsearch.node.Node")); + assertThat("Container logs don't contain INFO level messages", containerLogs.stdout(), containsString("INFO")); } /** @@ -971,7 +999,7 @@ public void test121CanUseStackLoggingConfig() { waitForElasticsearch(installation, "elastic", PASSWORD); final Result containerLogs = getContainerLogs(); - final List stdout = containerLogs.stdout.lines().collect(Collectors.toList()); + final List stdout = containerLogs.stdout().lines().toList(); // We select to look for a line near the beginning so that we don't stumble upon the stdout printing of auto-configured credentials assertThat("Container logs should be formatted using the stack config", stdout.get(10), matchesPattern("^\\[\\d\\d\\d\\d-.*")); assertThat("[logs/docker-cluster.log] should exist but it doesn't", existsInContainer("logs/docker-cluster.log"), is(true)); @@ -986,7 +1014,7 @@ public void test122CanUseDockerLoggingConfig() { waitForElasticsearch(installation, "elastic", PASSWORD); final Result containerLogs = getContainerLogs(); - final List stdout = containerLogs.stdout.lines().collect(Collectors.toList()); + final List stdout = containerLogs.stdout().lines().toList(); // We select to look for a line near the beginning so that we don't stumble upon the stdout printing of auto-configured credentials assertThat("Container logs should be formatted using the docker config", stdout.get(10), startsWith("{\"")); assertThat("[logs/docker-cluster.log] shouldn't exist but it does", existsInContainer("logs/docker-cluster.log"), is(false)); @@ -998,7 +1026,7 @@ public void test122CanUseDockerLoggingConfig() { public void test123CannotUseUnknownLoggingConfig() { final Result result = runContainerExpectingFailure(distribution(), builder().envVar("ES_LOG_STYLE", "unknown")); - assertThat(result.stderr, containsString("ERROR: ES_LOG_STYLE set to [unknown]. Expected [console] or [file]")); + assertThat(result.stderr(), containsString("ERROR: ES_LOG_STYLE set to [unknown]. Expected [console] or [file]")); } /** @@ -1021,11 +1049,11 @@ public void test124CanRestartContainerWithStackLoggingConfig() { public void test130JavaHasCorrectOwnership() { final ProcessInfo info = ProcessInfo.getProcessInfo(sh, "java"); - assertThat("Incorrect UID", info.uid, equalTo(1000)); - assertThat("Incorrect username", info.username, equalTo("elasticsearch")); + assertThat("Incorrect UID", info.uid(), equalTo(1000)); + assertThat("Incorrect username", info.username(), equalTo("elasticsearch")); - assertThat("Incorrect GID", info.gid, equalTo(0)); - assertThat("Incorrect group", info.group, equalTo("root")); + assertThat("Incorrect GID", info.gid(), equalTo(0)); + assertThat("Incorrect group", info.group(), equalTo("root")); } /** @@ -1035,13 +1063,13 @@ public void test130JavaHasCorrectOwnership() { public void test131InitProcessHasCorrectPID() { final ProcessInfo info = ProcessInfo.getProcessInfo(sh, "tini"); - assertThat("Incorrect PID", info.pid, equalTo(1)); + assertThat("Incorrect PID", info.pid(), equalTo(1)); - assertThat("Incorrect UID", info.uid, equalTo(1000)); - assertThat("Incorrect username", info.username, equalTo("elasticsearch")); + assertThat("Incorrect UID", info.uid(), equalTo(1000)); + assertThat("Incorrect username", info.username(), equalTo("elasticsearch")); - assertThat("Incorrect GID", info.gid, equalTo(0)); - assertThat("Incorrect group", info.group, equalTo("root")); + assertThat("Incorrect GID", info.gid(), equalTo(0)); + assertThat("Incorrect group", info.group(), equalTo("root")); } /** @@ -1111,7 +1139,8 @@ private List machineDependentHeapTest(final String containerMemory, fina // Grab the container output and find the line where it print the JVM arguments. This will // let us see what the automatic heap sizing calculated. - final Optional jvmArgumentsLine = getContainerLogs().stdout.lines() + final Optional jvmArgumentsLine = getContainerLogs().stdout() + .lines() .filter(line -> line.contains("JVM arguments")) .findFirst(); assertThat("Failed to find jvmArguments in container logs", jvmArgumentsLine.isPresent(), is(true)); @@ -1137,18 +1166,31 @@ public void test160CheckImageHealthcheckDefinition() throws Exception { } } + /** + * Ensure that the default shell in the image is {@code bash}, since some alternatives e.g. {@code dash} + * are stricter about environment variable names. + */ + public void test170DefaultShellIsBash() { + final Result result = DockerShell.executeCommand("/bin/sh", "-c", "echo $SHELL"); + if (result.isSuccess()) { + assertThat(result.stdout(), equalTo("/bin/bash")); + } else { + throw new RuntimeException("Command failed: " + result.stderr()); + } + } + /** * Check that the UBI images has the correct license information in the correct place. */ public void test200UbiImagesHaveLicenseDirectory() { assumeTrue(distribution.packaging == Packaging.DOCKER_UBI); - final String[] files = sh.run("find /licenses -type f").stdout.split("\n"); + final String[] files = sh.run("find /licenses -type f").stdout().split("\n"); assertThat(files, arrayContaining("/licenses/LICENSE")); // UBI image doesn't contain `diff` - final String ubiLicense = sh.run("cat /licenses/LICENSE").stdout; - final String distroLicense = sh.run("cat /usr/share/elasticsearch/LICENSE.txt").stdout; + final String ubiLicense = sh.run("cat /licenses/LICENSE").stdout(); + final String distroLicense = sh.run("cat /usr/share/elasticsearch/LICENSE.txt").stdout(); assertThat(ubiLicense, equalTo(distroLicense)); } @@ -1183,12 +1225,12 @@ public void test210UbiLabels() throws Exception { public void test300IronBankImagesHaveLicenseDirectory() { assumeTrue(distribution.packaging == Packaging.DOCKER_IRON_BANK); - final String[] files = sh.run("find /licenses -type f").stdout.split("\n"); + final String[] files = sh.run("find /licenses -type f").stdout().split("\n"); assertThat(files, arrayContaining("/licenses/LICENSE", "/licenses/LICENSE.addendum")); // Image doesn't contain `diff` - final String ubiLicense = sh.run("cat /licenses/LICENSE").stdout; - final String distroLicense = sh.run("cat /usr/share/elasticsearch/LICENSE.txt").stdout; + final String ubiLicense = sh.run("cat /licenses/LICENSE").stdout(); + final String distroLicense = sh.run("cat /usr/share/elasticsearch/LICENSE.txt").stdout(); assertThat(ubiLicense, equalTo(distroLicense)); } @@ -1228,6 +1270,6 @@ public void test400CloudImageBundlesBeats() { private List listPlugins() { final Installation.Executables bin = installation.executables(); - return sh.run(bin.pluginTool + " list").stdout.lines().collect(Collectors.toList()); + return sh.run(bin.pluginTool + " list").stdout().lines().collect(Collectors.toList()); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollNodeToClusterTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollNodeToClusterTests.java index bac31a7dfeb5b..91e5051550e1f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollNodeToClusterTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollNodeToClusterTests.java @@ -42,7 +42,7 @@ public void test20EnrollToClusterWithEmptyTokenValue() throws Exception { // something in our tests wrap the error code to 1 on windows // TODO investigate this and remove this guard if (distribution.platform != Distribution.Platform.WINDOWS) { - assertThat(result.exitCode, equalTo(ExitCodes.USAGE)); + assertThat(result.exitCode(), equalTo(ExitCodes.USAGE)); } verifySecurityNotAutoConfigured(installation); } @@ -58,12 +58,28 @@ public void test30EnrollToClusterWithInvalidToken() throws Exception { // something in our tests wrap the error code to 1 on windows // TODO investigate this and remove this guard if (distribution.platform != Distribution.Platform.WINDOWS) { - assertThat(result.exitCode, equalTo(ExitCodes.DATA_ERROR)); + assertThat(result.exitCode(), equalTo(ExitCodes.DATA_ERROR)); } verifySecurityNotAutoConfigured(installation); } - public void test40EnrollmentFailsForConfiguredNode() throws Exception { + public void test40EnrollToClusterWithInvalidAddress() throws Exception { + Shell.Result result = Archives.runElasticsearchStartCommand( + installation, + sh, + null, + List.of("--enrollment-token", generateMockEnrollmentToken()), + false + ); + // something in our tests wrap the error code to 1 on windows + // TODO investigate this and remove this guard + if (distribution.platform != Distribution.Platform.WINDOWS) { + assertThat(result.exitCode(), equalTo(ExitCodes.UNAVAILABLE)); + } + verifySecurityNotAutoConfigured(installation); + } + + public void test50EnrollmentFailsForConfiguredNode() throws Exception { // auto-config requires that the archive owner and the process user be the same, Platforms.onWindows(() -> sh.chown(installation.config, installation.getOwner())); startElasticsearch(); @@ -79,12 +95,12 @@ public void test40EnrollmentFailsForConfiguredNode() throws Exception { // something in our tests wrap the error code to 1 on windows // TODO investigate this and remove this guard if (distribution.platform != Distribution.Platform.WINDOWS) { - assertThat(result.exitCode, equalTo(ExitCodes.NOOP)); + assertThat(result.exitCode(), equalTo(ExitCodes.NOOP)); } Platforms.onWindows(() -> sh.chown(installation.config)); } - public void test50MultipleValuesForEnrollmentToken() throws Exception { + public void test60MultipleValuesForEnrollmentToken() throws Exception { // if invoked with --enrollment-token tokenA tokenB tokenC, only tokenA is read Shell.Result result = Archives.runElasticsearchStartCommand( installation, @@ -97,11 +113,11 @@ public void test50MultipleValuesForEnrollmentToken() throws Exception { // something in our tests wrap the error code to 1 on windows // TODO investigate this and remove this guard if (distribution.platform != Distribution.Platform.WINDOWS) { - assertThat(result.exitCode, equalTo(ExitCodes.NOOP)); + assertThat(result.exitCode(), equalTo(ExitCodes.NOOP)); } } - public void test60MultipleParametersForEnrollmentTokenAreNotAllowed() throws Exception { + public void test70MultipleParametersForEnrollmentTokenAreNotAllowed() throws Exception { // if invoked with --enrollment-token tokenA --enrollment-token tokenB --enrollment-token tokenC, we exit Shell.Result result = Archives.runElasticsearchStartCommand( installation, @@ -117,8 +133,8 @@ public void test60MultipleParametersForEnrollmentTokenAreNotAllowed() throws Exc ), false ); - assertThat(result.stderr, containsString("Multiple --enrollment-token parameters are not allowed")); - assertThat(result.exitCode, equalTo(1)); + assertThat(result.stderr(), containsString("Multiple --enrollment-token parameters are not allowed")); + assertThat(result.exitCode(), equalTo(1)); } private String generateMockEnrollmentToken() throws Exception { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java index 0d361d4e6cec1..d21f8786e31e4 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java @@ -9,35 +9,36 @@ package org.elasticsearch.packaging.test; import org.elasticsearch.common.Strings; -import org.elasticsearch.packaging.test.PackagingTestCase.AwaitsFix; import org.elasticsearch.packaging.util.Archives; import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.Shell; -import org.junit.BeforeClass; +import org.elasticsearch.packaging.util.docker.Docker; import java.io.IOException; import java.net.InetAddress; import java.net.Socket; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.packaging.util.Archives.installArchive; import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation; import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion; +import static org.elasticsearch.packaging.util.docker.Docker.removeContainer; +import static org.elasticsearch.packaging.util.docker.Docker.runAdditionalContainer; +import static org.elasticsearch.packaging.util.docker.Docker.runContainer; +import static org.elasticsearch.packaging.util.docker.Docker.verifyContainerInstallation; +import static org.elasticsearch.packaging.util.docker.Docker.waitForElasticsearch; +import static org.elasticsearch.packaging.util.docker.DockerRun.builder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.junit.Assume.assumeTrue; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79810") public class EnrollmentProcessTests extends PackagingTestCase { - @BeforeClass - public static void filterDistros() { - assumeTrue("only archives", distribution.isArchive()); - } - - public void test10AutoFormCluster() throws Exception { + public void test10ArchiveAutoFormCluster() throws Exception { /* Windows issue awaits fix: https://github.com/elastic/elasticsearch/issues/49340 */ assumeTrue("expect command isn't on Windows", distribution.platform != Distribution.Platform.WINDOWS); + assumeTrue("only archives", distribution.isArchive()); installation = installArchive(sh, distribution(), getRootTempDir().resolve("elasticsearch-node1"), getCurrentVersion(), true); verifyArchiveInstallation(installation, distribution()); setFileSuperuser("test_superuser", "test_superuser_password"); @@ -50,10 +51,25 @@ public void test10AutoFormCluster() throws Exception { verifySecurityAutoConfigured(installation); // Generate a node enrollment token to be subsequently used by the second node Shell.Result createTokenResult = installation.executables().createEnrollmentToken.run("-s node"); - assertThat(Strings.isNullOrEmpty(createTokenResult.stdout), is(false)); - final String enrollmentToken = createTokenResult.stdout; + assertThat(Strings.isNullOrEmpty(createTokenResult.stdout()), is(false)); + final String enrollmentToken = createTokenResult.stdout(); // installation now points to the second node installation = installArchive(sh, distribution(), getRootTempDir().resolve("elasticsearch-node2"), getCurrentVersion(), true); + + // Try to start the node with an invalid enrollment token and verify it fails to start + Shell.Result startSecondNodeWithInvalidToken = Archives.startElasticsearchWithTty( + installation, + sh, + null, + List.of("--enrollment-token", "some-invalid-token-here"), + false + ); + assertThat( + startSecondNodeWithInvalidToken.stdout(), + containsString("Failed to parse enrollment token : some-invalid-token-here . Error was: Illegal base64 character 2d") + ); + verifySecurityNotAutoConfigured(installation); + // auto-configure security using the enrollment token Shell.Result startSecondNode = awaitElasticsearchStartupWithResult( Archives.startElasticsearchWithTty(installation, sh, null, List.of("--enrollment-token", enrollmentToken), false) @@ -68,6 +84,46 @@ public void test10AutoFormCluster() throws Exception { assertThat(makeRequest("https://localhost:9200/_cluster/health"), containsString("\"number_of_nodes\":2")); } + public void test20DockerAutoFormCluster() throws Exception { + assumeTrue("only docker", distribution.isDocker()); + // First node + installation = runContainer(distribution(), builder().envVar("ELASTIC_PASSWORD", "password")); + verifyContainerInstallation(installation); + verifySecurityAutoConfigured(installation); + waitForElasticsearch(installation); + final String node1ContainerId = Docker.getContainerId(); + + final String enrollmentToken = installation.executables().createEnrollmentToken.run("-s node") + .stdout() + .lines() + .filter(line -> line.startsWith("WARNING:") == false) + .findFirst() + .orElseThrow(() -> new AssertionError("Failing to find any non-warning output lines")); + + // installation refers to second node from now on + installation = runAdditionalContainer(distribution(), builder().envVar("ENROLLMENT_TOKEN", enrollmentToken), 9201, 9301); + + // TODO Make our packaging test methods aware of multiple installations, see https://github.com/elastic/elasticsearch/issues/79688 + waitForElasticsearch(installation); + verifyContainerInstallation(installation); + verifySecurityAutoConfigured(installation); + + // Allow some time for the second node to join the cluster, we can probably do this more elegantly in + // https://github.com/elastic/elasticsearch/issues/79688 + // Then verify that the two nodes formed a cluster + assertBusy( + () -> assertThat( + makeRequestAsElastic("https://localhost:9200/_cluster/health", "password"), + containsString("\"number_of_nodes\":2") + ), + 20, + TimeUnit.SECONDS + ); + + // Cleanup the first node that is still running + removeContainer(node1ContainerId); + } + private void waitForSecondNode() { int retries = 60; while (retries > 0) { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java index bdac50050c5bd..9d9bb3f2f368a 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java @@ -73,7 +73,7 @@ public void test10InstallArchiveDistribution() throws Exception { final Installation.Executables bin = installation.executables(); Shell.Result r = sh.runIgnoreExitCode(bin.keystoreTool + " has-passwd"); assertFalse("has-passwd should fail", r.isSuccess()); - assertThat("has-passwd should indicate missing keystore", r.stderr, containsString(ERROR_KEYSTORE_NOT_FOUND)); + assertThat("has-passwd should indicate missing keystore", r.stderr(), containsString(ERROR_KEYSTORE_NOT_FOUND)); } /** Test initial package state */ @@ -89,9 +89,9 @@ public void test11InstallPackageDistribution() throws Exception { final Installation.Executables bin = installation.executables(); Shell.Result r = sh.runIgnoreExitCode(bin.keystoreTool + " has-passwd"); assertFalse("has-passwd should fail", r.isSuccess()); - assertThat("has-passwd should indicate unprotected keystore", r.stderr, containsString(ERROR_KEYSTORE_NOT_PASSWORD_PROTECTED)); + assertThat("has-passwd should indicate unprotected keystore", r.stderr(), containsString(ERROR_KEYSTORE_NOT_PASSWORD_PROTECTED)); Shell.Result r2 = bin.keystoreTool.run("list"); - assertThat(r2.stdout, containsString("keystore.seed")); + assertThat(r2.stdout(), containsString("keystore.seed")); } /** Test initial Docker state */ @@ -109,9 +109,9 @@ public void test12InstallDockerDistribution() throws Exception { final Installation.Executables bin = installation.executables(); Shell.Result r = sh.runIgnoreExitCode(bin.keystoreTool + " has-passwd"); assertFalse("has-passwd should fail", r.isSuccess()); - assertThat("has-passwd should indicate unprotected keystore", r.stdout, containsString(ERROR_KEYSTORE_NOT_PASSWORD_PROTECTED)); + assertThat("has-passwd should indicate unprotected keystore", r.stdout(), containsString(ERROR_KEYSTORE_NOT_PASSWORD_PROTECTED)); Shell.Result r2 = bin.keystoreTool.run("list"); - assertThat(r2.stdout, containsString("keystore.seed")); + assertThat(r2.stdout(), containsString("keystore.seed")); } public void test20KeystorePasswordOnStandardInput() throws Exception { @@ -160,7 +160,7 @@ public void test23WrongKeystorePasswordOnTty() throws Exception { Shell.Result result = runElasticsearchStartCommand("wrong", false, true); // error will be on stdout for "expect" - assertThat(result.stdout, anyOf(containsString(ERROR_INCORRECT_PASSWORD), containsString(ERROR_CORRUPTED_KEYSTORE))); + assertThat(result.stdout(), anyOf(containsString(ERROR_INCORRECT_PASSWORD), containsString(ERROR_CORRUPTED_KEYSTORE))); } /** @@ -172,7 +172,7 @@ public void test24EncryptedKeystoreAllowsHelpMessage() throws Exception { assertPasswordProtectedKeystore(); Shell.Result r = installation.executables().elasticsearch.run("--help"); - assertThat(r.stdout, startsWith("Starts Elasticsearch")); + assertThat(r.stdout(), startsWith("Starts Elasticsearch")); } public void test30KeystorePasswordFromFile() throws Exception { @@ -293,7 +293,7 @@ public void test42DockerEnvironmentVariableBadPassword() throws Exception { distribution(), builder().volume(localConfigPath.resolve("config"), installation.config).envVar("KEYSTORE_PASSWORD", "wrong") ); - assertThat(r.stderr, containsString(ERROR_INCORRECT_PASSWORD)); + assertThat(r.stderr(), containsString(ERROR_INCORRECT_PASSWORD)); } public void test50CreateKeystoreManually() throws Exception { @@ -305,7 +305,7 @@ public void test50CreateKeystoreManually() throws Exception { verifyKeystorePermissions(); Shell.Result r = bin.keystoreTool.run("list"); - assertThat(r.stdout, containsString("keystore.seed")); + assertThat(r.stdout(), containsString("keystore.seed")); } public void test60AutoCreateKeystore() throws Exception { @@ -325,7 +325,7 @@ public void test60AutoCreateKeystore() throws Exception { final Installation.Executables bin = installation.executables(); Shell.Result r = bin.keystoreTool.run("list"); - assertThat(r.stdout, containsString("keystore.seed")); + assertThat(r.stdout(), containsString("keystore.seed")); } /** @@ -427,29 +427,16 @@ private void setKeystorePassword(String password) throws Exception { private void assertPasswordProtectedKeystore() { Shell.Result r = installation.executables().keystoreTool.run("has-passwd"); - assertThat("keystore should be password protected", r.exitCode, is(0)); + assertThat("keystore should be password protected", r.exitCode(), is(0)); } private void verifyKeystorePermissions() { Path keystore = installation.config("elasticsearch.keystore"); switch (distribution.packaging) { - case TAR: - case ZIP: - assertThat(keystore, file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660)); - break; - case DEB: - case RPM: - assertThat(keystore, file(File, "root", "elasticsearch", p660)); - break; - case DOCKER: - case DOCKER_UBI: - case DOCKER_IRON_BANK: - case DOCKER_CLOUD: - case DOCKER_CLOUD_ESS: - assertThat(keystore, DockerFileMatcher.file(p660)); - break; - default: - throw new IllegalStateException("Unknown Elasticsearch packaging type."); + case TAR, ZIP -> assertThat(keystore, file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660)); + case DEB, RPM -> assertThat(keystore, file(File, "root", "elasticsearch", p660)); + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS -> assertThat(keystore, DockerFileMatcher.file(p660)); + default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java index 62541d148c96c..5c38fa36a6640 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java @@ -63,19 +63,19 @@ public void test10InstallPackage() throws Exception { } public void test20PluginsCommandWhenNoPlugins() { - assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout, is(emptyString())); + assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout(), is(emptyString())); } public void test30DaemonIsNotEnabledOnRestart() { if (isSystemd()) { sh.run("systemctl daemon-reload"); - String isEnabledOutput = sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").stdout.trim(); + String isEnabledOutput = sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").stdout().trim(); assertThat(isEnabledOutput, equalTo("disabled")); } } public void test31InstallDoesNotStartServer() { - assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); + assertThat(sh.run("ps aux").stdout(), not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); } private void assertRunsWithJavaHome() throws Exception { @@ -131,12 +131,12 @@ public void test34CustomJvmOptionsDirectoryFile() throws Exception { } public void test40StartServer() throws Exception { - String start = sh.runIgnoreExitCode("date ").stdout.trim(); + String start = sh.runIgnoreExitCode("date ").stdout().trim(); startElasticsearch(); String journalEntries = sh.runIgnoreExitCode( "journalctl _SYSTEMD_UNIT=elasticsearch.service " + "--since \"" + start + "\" --output cat | wc -l" - ).stdout.trim(); + ).stdout().trim(); assertThat(journalEntries, equalTo("0")); assertPathsExist(installation.pidDir.resolve("elasticsearch.pid")); @@ -166,7 +166,7 @@ public void test50Remove() throws Exception { remove(distribution()); // removing must stop the service - assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); + assertThat(sh.run("ps aux").stdout(), not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); if (isSystemd()) { @@ -186,15 +186,15 @@ public void test50Remove() throws Exception { } else { final Result versionResult = sh.run("systemctl --version"); - final Matcher matcher = Pattern.compile("^systemd (\\d+)").matcher(versionResult.stdout); + final Matcher matcher = Pattern.compile("^systemd (\\d+)").matcher(versionResult.stdout()); matcher.find(); final int version = Integer.parseInt(matcher.group(1)); statusExitCode = version < 231 ? 3 : 4; } - assertThat(sh.runIgnoreExitCode("systemctl status elasticsearch.service").exitCode, is(statusExitCode)); - assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode, is(1)); + assertThat(sh.runIgnoreExitCode("systemctl status elasticsearch.service").exitCode(), is(statusExitCode)); + assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode(), is(1)); } @@ -258,7 +258,7 @@ public void test71JvmOptionsTotalMemoryOverride() throws Exception { assertThat(nodesStatsResponse, containsString("\"adjusted_total_in_bytes\":891289600")); // 40% of 850MB - assertThat(sh.run("ps auwwx").stdout, containsString("-Xms340m -Xmx340m")); + assertThat(sh.run("ps auwwx").stdout(), containsString("-Xms340m -Xmx340m")); stopElasticsearch(); }); @@ -358,16 +358,16 @@ public void test84serviceFileSetsLimits() throws Exception { final Path pidFile = installation.pidDir.resolve("elasticsearch.pid"); assertThat(pidFile, fileExists()); String pid = slurp(pidFile).trim(); - String maxFileSize = sh.run("cat /proc/%s/limits | grep \"Max file size\" | awk '{ print $4 }'", pid).stdout.trim(); + String maxFileSize = sh.run("cat /proc/%s/limits | grep \"Max file size\" | awk '{ print $4 }'", pid).stdout().trim(); assertThat(maxFileSize, equalTo("unlimited")); - String maxProcesses = sh.run("cat /proc/%s/limits | grep \"Max processes\" | awk '{ print $3 }'", pid).stdout.trim(); + String maxProcesses = sh.run("cat /proc/%s/limits | grep \"Max processes\" | awk '{ print $3 }'", pid).stdout().trim(); assertThat(maxProcesses, equalTo("4096")); - String maxOpenFiles = sh.run("cat /proc/%s/limits | grep \"Max open files\" | awk '{ print $4 }'", pid).stdout.trim(); + String maxOpenFiles = sh.run("cat /proc/%s/limits | grep \"Max open files\" | awk '{ print $4 }'", pid).stdout().trim(); assertThat(maxOpenFiles, equalTo("65535")); - String maxAddressSpace = sh.run("cat /proc/%s/limits | grep \"Max address space\" | awk '{ print $4 }'", pid).stdout.trim(); + String maxAddressSpace = sh.run("cat /proc/%s/limits | grep \"Max address space\" | awk '{ print $4 }'", pid).stdout().trim(); assertThat(maxAddressSpace, equalTo("unlimited")); stopElasticsearch(); @@ -389,7 +389,7 @@ public void test90DoNotCloseStderrWhenQuiet() throws Exception { assertBusy(() -> { final Result logs = journald.getLogs(); - assertThat(logs.stdout, containsString("Failed to load settings from [elasticsearch.yml]")); + assertThat(logs.stdout(), containsString("Failed to load settings from [elasticsearch.yml]")); }); }); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java index 58215f9900309..7275e9d6ca283 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java @@ -10,9 +10,11 @@ import org.apache.http.client.fluent.Request; import org.apache.http.entity.ContentType; +import org.elasticsearch.Version; import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.Packages; import org.elasticsearch.packaging.util.ServerUtils; +import org.junit.BeforeClass; import java.nio.file.Paths; @@ -20,6 +22,7 @@ import static org.elasticsearch.packaging.util.Packages.installPackage; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.hamcrest.Matchers.containsString; +import static org.junit.Assume.assumeTrue; public class PackageUpgradeTests extends PackagingTestCase { @@ -29,6 +32,12 @@ public class PackageUpgradeTests extends PackagingTestCase { bwcDistribution = new Distribution(Paths.get(System.getProperty("tests.bwc-distribution"))); } + @BeforeClass + public static void filterVersions() { + // TODO: Explicitly add testing for these versions that validates that starting the node after upgrade fails + assumeTrue("only wire compatible versions", Version.fromString(bwcDistribution.baseVersion).isCompatible(Version.CURRENT)); + } + public void test10InstallBwcVersion() throws Exception { installation = installPackage(sh, bwcDistribution); assertInstalled(bwcDistribution); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java index baafe77a8d483..b84dd871157c3 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java @@ -8,25 +8,47 @@ package org.elasticsearch.packaging.test; +import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ssl.PemKeyConfig; +import org.elasticsearch.packaging.util.FileMatcher; import org.elasticsearch.packaging.util.Installation; import org.elasticsearch.packaging.util.Packages; import org.elasticsearch.packaging.util.Shell; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.EnrollmentToken; +import org.hamcrest.CoreMatchers; import org.junit.BeforeClass; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.StandardCopyOption; +import java.security.SecureRandom; import java.util.List; import java.util.Optional; import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; +import static org.elasticsearch.packaging.util.FileMatcher.Fileness.Directory; +import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File; +import static org.elasticsearch.packaging.util.FileMatcher.p660; +import static org.elasticsearch.packaging.util.FileMatcher.p750; import static org.elasticsearch.packaging.util.FileUtils.append; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; import static org.elasticsearch.packaging.util.Packages.installPackage; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -110,7 +132,7 @@ public void test50ReconfigureAndEnroll() throws Exception { // We cannot run two packaged installations simultaneously here so that we can test that the second node enrolls successfully // We trigger with an invalid enrollment token, to verify that we removed the existing auto-configuration Shell.Result result = installation.executables().nodeReconfigureTool.run("--enrollment-token thisisinvalid", "y", true); - assertThat(result.exitCode, equalTo(ExitCodes.DATA_ERROR)); // invalid enrollment token + assertThat(result.exitCode(), equalTo(ExitCodes.DATA_ERROR)); // invalid enrollment token verifySecurityNotAutoConfigured(installation); } @@ -123,7 +145,7 @@ public void test60ReconfigureWithoutEnrollmentToken() throws Exception { verifySecurityAutoConfigured(installation); assertNotNull(installation.getElasticPassword()); Shell.Result result = installation.executables().nodeReconfigureTool.run("", null, true); - assertThat(result.exitCode, equalTo(ExitCodes.USAGE)); // missing enrollment token + assertThat(result.exitCode(), equalTo(ExitCodes.USAGE)); // missing enrollment token // we fail on command invocation so we don't even try to remove autoconfiguration verifySecurityAutoConfigured(installation); } @@ -143,7 +165,7 @@ public void test70ReconfigureFailsWhenTlsAutoConfDirMissing() throws Exception { // Move instead of delete because Files.deleteIfExists bails on non empty dirs Files.move(installation.config(autoConfigDirName.get()), installation.config("temp-autoconf-dir")); Shell.Result result = installation.executables().nodeReconfigureTool.run("--enrollment-token a-token", "y", true); - assertThat(result.exitCode, equalTo(ExitCodes.USAGE)); // + assertThat(result.exitCode(), equalTo(ExitCodes.USAGE)); // } public void test71ReconfigureFailsWhenKeyStorePasswordWrong() throws Exception { @@ -154,18 +176,15 @@ public void test71ReconfigureFailsWhenKeyStorePasswordWrong() throws Exception { verifyPackageInstallation(installation, distribution(), sh); verifySecurityAutoConfigured(installation); assertNotNull(installation.getElasticPassword()); - Shell.Result changePassword = installation.executables().keystoreTool.run( - "passwd", - "some-password" + "\n" + "some-password" + "\n" - ); - assertThat(changePassword.exitCode, equalTo(0)); + Shell.Result changePassword = installation.executables().keystoreTool.run("passwd", "some-password\nsome-password\n"); + assertThat(changePassword.exitCode(), equalTo(0)); Shell.Result result = installation.executables().nodeReconfigureTool.run( "--enrollment-token a-token", "y" + "\n" + "some-wrong-password", true ); - assertThat(result.exitCode, equalTo(ExitCodes.IO_ERROR)); // - assertThat(result.stderr, containsString("Error was: Provided keystore password was incorrect")); + assertThat(result.exitCode(), equalTo(ExitCodes.IO_ERROR)); // + assertThat(result.stderr(), containsString("Error was: Provided keystore password was incorrect")); } public void test71ReconfigureFailsWhenKeyStoreDoesNotContainExpectedSettings() throws Exception { @@ -179,11 +198,11 @@ public void test71ReconfigureFailsWhenKeyStoreDoesNotContainExpectedSettings() t Shell.Result removeSetting = installation.executables().keystoreTool.run( "remove xpack.security.transport.ssl.keystore.secure_password" ); - assertThat(removeSetting.exitCode, equalTo(0)); + assertThat(removeSetting.exitCode(), equalTo(0)); Shell.Result result = installation.executables().nodeReconfigureTool.run("--enrollment-token a-token", "y", true); - assertThat(result.exitCode, equalTo(ExitCodes.IO_ERROR)); + assertThat(result.exitCode(), equalTo(ExitCodes.IO_ERROR)); assertThat( - result.stderr, + result.stderr(), containsString( "elasticsearch.keystore did not contain expected setting [xpack.security.transport.ssl.keystore.secure_password]." ) @@ -203,8 +222,8 @@ public void test72ReconfigureFailsWhenConfigurationDoesNotContainSecurityAutoCon Files.write(yml, List.of(), TRUNCATE_EXISTING); Shell.Result result = installation.executables().nodeReconfigureTool.run("--enrollment-token a-token", "y", true); - assertThat(result.exitCode, equalTo(ExitCodes.USAGE)); // - assertThat(result.stderr, containsString("Expected configuration is missing from elasticsearch.yml.")); + assertThat(result.exitCode(), equalTo(ExitCodes.USAGE)); // + assertThat(result.stderr(), containsString("Expected configuration is missing from elasticsearch.yml.")); } public void test72ReconfigureRetainsUserSettings() throws Exception { @@ -220,7 +239,7 @@ public void test72ReconfigureRetainsUserSettings() throws Exception { List allLines = Files.readAllLines(yml); // Replace a comment we know exists in the auto-configuration stanza, with a user defined setting allLines.set( - allLines.indexOf("# All the nodes use the same key and certificate on the inter-node connection"), + allLines.indexOf("# Enable encryption for HTTP API client connections, such as Kibana, Logstash, and Agents"), "cluster.name: testclustername" ); allLines.add("node.name: testnodename"); @@ -229,7 +248,7 @@ public void test72ReconfigureRetainsUserSettings() throws Exception { // We cannot run two packaged installations simultaneously here so that we can test that the second node enrolls successfully // We trigger with an invalid enrollment token, to verify that we removed the existing auto-configuration Shell.Result result = installation.executables().nodeReconfigureTool.run("--enrollment-token thisisinvalid", "y", true); - assertThat(result.exitCode, equalTo(ExitCodes.DATA_ERROR)); // invalid enrollment token + assertThat(result.exitCode(), equalTo(ExitCodes.DATA_ERROR)); // invalid enrollment token verifySecurityNotAutoConfigured(installation); // Check that user configuration , both inside and outside the autocofiguration stanza, was retained Path editedYml = installation.config("elasticsearch.yml"); @@ -238,6 +257,69 @@ public void test72ReconfigureRetainsUserSettings() throws Exception { assertThat(newConfigurationLines, hasItem("node.name: testnodename")); } + public void test73ReconfigureCreatesFilesWithCorrectPermissions() throws Exception { + cleanup(); + assertRemoved(distribution()); + installation = installPackage(sh, distribution(), successfulAutoConfiguration()); + assertInstalled(distribution()); + verifyPackageInstallation(installation, distribution(), sh); + verifySecurityAutoConfigured(installation); + assertNotNull(installation.getElasticPassword()); + final PemKeyConfig keyConfig = new PemKeyConfig( + Paths.get(getClass().getResource("http.crt").toURI()).toAbsolutePath().normalize().toString(), + Paths.get(getClass().getResource("http.key").toURI()).toAbsolutePath().normalize().toString(), + new char[0], + Paths.get(getClass().getResource("http.crt").toURI()).getParent().toAbsolutePath().normalize() + ); + final SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(new KeyManager[] { keyConfig.createKeyManager() }, new TrustManager[] {}, new SecureRandom()); + // We can't run multiple nodes as package installations. We mock an initial node that would respond to the enroll node API + try (MockWebServer mockNode = new MockWebServer(sslContext, false)) { + mockNode.start(); + final String httpCaCertPemString = Files.readAllLines( + Paths.get(getClass().getResource("http_ca.crt").toURI()).toAbsolutePath().normalize() + ).stream().filter(l -> l.contains("-----") == false).collect(Collectors.joining()); + final String httpCaKeyPemString = Files.readAllLines( + Paths.get(getClass().getResource("http_ca.key").toURI()).toAbsolutePath().normalize() + ).stream().filter(l -> l.contains("-----") == false).collect(Collectors.joining()); + final String transportCaCertPemString = Files.readAllLines( + Paths.get(getClass().getResource("transport_ca.crt").toURI()).toAbsolutePath().normalize() + ).stream().filter(l -> l.contains("-----") == false).collect(Collectors.joining()); + final String transportKeyPemString = Files.readAllLines( + Paths.get(getClass().getResource("transport.key").toURI()).toAbsolutePath().normalize() + ).stream().filter(l -> l.contains("-----") == false).collect(Collectors.joining()); + final String transportCertPemString = Files.readAllLines( + Paths.get(getClass().getResource("transport.crt").toURI()).toAbsolutePath().normalize() + ).stream().filter(l -> l.contains("-----") == false).collect(Collectors.joining()); + final XContentBuilder responseBuilder = jsonBuilder().startObject() + .field("http_ca_key", httpCaKeyPemString) + .field("http_ca_cert", httpCaCertPemString) + .field("transport_ca_cert", transportCaCertPemString) + .field("transport_key", transportKeyPemString) + .field("transport_cert", transportCertPemString) + .array("nodes_addresses", "192.168.1.23:9300") // won't be used, can be anything + .endObject(); + mockNode.enqueue(new MockResponse().setResponseCode(200).setBody(Strings.toString(responseBuilder))); + final EnrollmentToken enrollmentToken = new EnrollmentToken( + "some-api-key", + "b0150fd8a29f9012207912de9a01aa1d1f0dd696c847d3a9353881f9045bf442", // fingerprint of http_ca.crt + Version.CURRENT.toString(), + List.of(mockNode.getHostName() + ":" + mockNode.getPort()) + ); + Shell.Result result = installation.executables().nodeReconfigureTool.run( + "-v --enrollment-token " + enrollmentToken.getEncoded(), + "y", + true + ); + assertThat(result.exitCode(), CoreMatchers.equalTo(0)); + assertThat(installation.config("certs"), FileMatcher.file(Directory, "root", "elasticsearch", p750)); + Stream.of("http.p12", "http_ca.crt", "transport.p12") + .forEach( + file -> assertThat(installation.config("certs").resolve(file), FileMatcher.file(File, "root", "elasticsearch", p660)) + ); + } + } + private Predicate successfulAutoConfiguration() { Predicate p1 = output -> output.contains("Authentication and authorization are enabled."); Predicate p2 = output -> output.contains("TLS for the transport and HTTP layers is enabled and configured."); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index e2f2344e3f907..644219572e4e3 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -19,6 +19,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.Tuple; @@ -88,7 +89,6 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.junit.Assume.assumeFalse; @@ -126,12 +126,12 @@ public abstract class PackagingTestCase extends Assert { // the java installation already installed on the system protected static final String systemJavaHome; static { - Shell sh = new Shell(); + Shell initShell = new Shell(); if (Platforms.WINDOWS) { - systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + systemJavaHome = initShell.run("$Env:SYSTEM_JAVA_HOME").stdout().trim(); } else { assert Platforms.LINUX || Platforms.DARWIN; - systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + systemJavaHome = initShell.run("echo $SYSTEM_JAVA_HOME").stdout().trim(); } } @@ -232,26 +232,19 @@ protected static Distribution distribution() { protected static void install() throws Exception { switch (distribution.packaging) { - case TAR: - case ZIP: + case TAR, ZIP -> { installation = Archives.installArchive(sh, distribution); Archives.verifyArchiveInstallation(installation, distribution); - break; - case DEB: - case RPM: + } + case DEB, RPM -> { installation = Packages.installPackage(sh, distribution); Packages.verifyPackageInstallation(installation, distribution, sh); - break; - case DOCKER: - case DOCKER_UBI: - case DOCKER_IRON_BANK: - case DOCKER_CLOUD: - case DOCKER_CLOUD_ESS: + } + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS -> { installation = Docker.runContainer(distribution); Docker.verifyContainerInstallation(installation); - break; - default: - throw new IllegalStateException("Unknown Elasticsearch packaging type."); + } + default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } // the purpose of the packaging tests are not to all test auto heap, so we explicitly set heap size to 1g @@ -362,25 +355,12 @@ public void stopElasticsearch() throws Exception { } public void awaitElasticsearchStartup(Shell.Result result) throws Exception { - assertThat("Startup command should succeed. Stderr: [" + result + "]", result.exitCode, equalTo(0)); + assertThat("Startup command should succeed. Stderr: [" + result + "]", result.exitCode(), equalTo(0)); switch (distribution.packaging) { - case TAR: - case ZIP: - Archives.assertElasticsearchStarted(installation); - break; - case DEB: - case RPM: - Packages.assertElasticsearchStarted(sh, installation); - break; - case DOCKER: - case DOCKER_UBI: - case DOCKER_IRON_BANK: - case DOCKER_CLOUD: - case DOCKER_CLOUD_ESS: - Docker.waitForElasticsearchToStart(); - break; - default: - throw new IllegalStateException("Unknown Elasticsearch packaging type."); + case TAR, ZIP -> Archives.assertElasticsearchStarted(installation); + case DEB, RPM -> Packages.assertElasticsearchStarted(sh, installation); + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS -> Docker.waitForElasticsearchToStart(); + default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } @@ -405,7 +385,7 @@ public void startElasticsearch() throws Exception { if (Files.exists(installation.home.resolve("elasticsearch.pid"))) { String pid = FileUtils.slurp(installation.home.resolve("elasticsearch.pid")).trim(); logger.info("elasticsearch process ({}) failed to start", pid); - if (sh.run("jps").stdout.contains(pid)) { + if (sh.run("jps").stdout().contains(pid)) { logger.info("Dumping jstack of elasticsearch process ({}) ", pid); sh.runIgnoreExitCode("jstack " + pid); } @@ -433,15 +413,15 @@ public void assertElasticsearchFailure(Shell.Result result, List expecte } else if (distribution().isPackage() && Platforms.isSystemd()) { // For systemd, retrieve the error from journalctl - assertThat(result.stderr, containsString("Job for elasticsearch.service failed")); + assertThat(result.stderr(), containsString("Job for elasticsearch.service failed")); Shell.Result error = journaldWrapper.getLogs(); - assertThat(error.stdout, anyOf(stringMatchers)); + assertThat(error.stdout(), anyOf(stringMatchers)); } else if (Platforms.WINDOWS && Files.exists(Archives.getPowershellErrorPath(installation))) { // In Windows, we have written our stdout and stderr to files in order to run // in the background - String wrapperPid = result.stdout.trim(); + String wrapperPid = result.stdout().trim(); sh.runIgnoreExitCode("Wait-Process -Timeout " + Archives.ES_STARTUP_SLEEP_TIME_SECONDS + " -Id " + wrapperPid); sh.runIgnoreExitCode( "Get-EventSubscriber | " @@ -453,7 +433,7 @@ public void assertElasticsearchFailure(Shell.Result result, List expecte } else { // Otherwise, error should be on shell stderr - assertThat(result.stderr, anyOf(stringMatchers)); + assertThat(result.stderr(), anyOf(stringMatchers)); } } @@ -651,17 +631,17 @@ public static void assertBusy(CheckedRunnable codeBlock, long maxWait public void verifySecurityAutoConfigured(Installation es) throws Exception { Optional autoConfigDirName = getAutoConfigDirName(es); assertThat(autoConfigDirName.isPresent(), Matchers.is(true)); - final List configLines; + final Settings settings; if (es.distribution.isArchive()) { // We chown the installation on Windows to Administrators so that we can auto-configure it. String owner = Platforms.WINDOWS ? "BUILTIN\\Administrators" : "elasticsearch"; assertThat(es.config(autoConfigDirName.get()), FileMatcher.file(Directory, owner, owner, p750)); - Stream.of("http_keystore_local_node.p12", "http_ca.crt", "transport_keystore_all_nodes.p12") + Stream.of("http.p12", "http_ca.crt", "transport.p12") .forEach(file -> assertThat(es.config(autoConfigDirName.get()).resolve(file), FileMatcher.file(File, owner, owner, p660))); - configLines = Files.readAllLines(es.config("elasticsearch.yml")); + settings = Settings.builder().loadFromPath(es.config("elasticsearch.yml")).build(); } else if (es.distribution.isDocker()) { assertThat(es.config(autoConfigDirName.get()), DockerFileMatcher.file(Directory, "elasticsearch", "root", p750)); - Stream.of("http_keystore_local_node.p12", "http_ca.crt", "transport_keystore_all_nodes.p12") + Stream.of("http.p12", "http_ca.crt", "transport.p12") .forEach( file -> assertThat( es.config(autoConfigDirName.get()).resolve(file), @@ -670,48 +650,34 @@ public void verifySecurityAutoConfigured(Installation es) throws Exception { ); Path localTempDir = createTempDir("docker-config"); copyFromContainer(es.config("elasticsearch.yml"), localTempDir.resolve("docker_elasticsearch.yml")); - configLines = Files.readAllLines(localTempDir.resolve("docker_elasticsearch.yml")); + settings = Settings.builder().loadFromPath(localTempDir.resolve("docker_elasticsearch.yml")).build(); rm(localTempDir.resolve("docker_elasticsearch.yml")); rm(localTempDir); } else { assert es.distribution.isPackage(); assertThat(es.config(autoConfigDirName.get()), FileMatcher.file(Directory, "root", "elasticsearch", p750)); - Stream.of("http_keystore_local_node.p12", "http_ca.crt", "transport_keystore_all_nodes.p12") + Stream.of("http.p12", "http_ca.crt", "transport.p12") .forEach( file -> assertThat( es.config(autoConfigDirName.get()).resolve(file), FileMatcher.file(File, "root", "elasticsearch", p660) ) ); - assertThat(sh.run(es.executables().keystoreTool + " list").stdout, Matchers.containsString("autoconfiguration.password_hash")); - configLines = Files.readAllLines(es.config("elasticsearch.yml")); + assertThat( + sh.run(es.executables().keystoreTool + " list").stdout(), + Matchers.containsString("autoconfiguration.password_hash") + ); + settings = Settings.builder().loadFromPath(es.config("elasticsearch.yml")).build(); } - assertThat(configLines, hasItem("xpack.security.enabled: true")); - assertThat(configLines, hasItem("xpack.security.http.ssl.enabled: true")); - assertThat(configLines, hasItem("xpack.security.transport.ssl.enabled: true")); + assertThat(settings.get("xpack.security.enabled"), equalTo("true")); + assertThat(settings.get("xpack.security.enrollment.enabled"), equalTo("true")); + assertThat(settings.get("xpack.security.transport.ssl.enabled"), equalTo("true")); + assertThat(settings.get("xpack.security.transport.ssl.verification_mode"), equalTo("certificate")); + assertThat(settings.get("xpack.security.http.ssl.enabled"), equalTo("true")); + assertThat(settings.get("xpack.security.enabled"), equalTo("true")); - assertThat(configLines, hasItem("xpack.security.enrollment.enabled: true")); - assertThat(configLines, hasItem("xpack.security.transport.ssl.verification_mode: certificate")); - assertThat( - configLines, - hasItem( - "xpack.security.transport.ssl.keystore.path: " - + es.config(autoConfigDirName.get()).resolve("transport_keystore_all_nodes.p12") - ) - ); - assertThat( - configLines, - hasItem( - "xpack.security.transport.ssl.truststore.path: " - + es.config(autoConfigDirName.get()).resolve("transport_keystore_all_nodes.p12") - ) - ); - assertThat( - configLines, - hasItem("xpack.security.http.ssl.keystore.path: " + es.config(autoConfigDirName.get()).resolve("http_keystore_local_node.p12")) - ); if (es.distribution.isDocker() == false) { - assertThat(configLines, hasItem("http.host: [_local_, _site_]")); + assertThat(settings.get("http.host"), equalTo("[_local_, _site_]")); } } @@ -725,16 +691,19 @@ public static void verifySecurityNotAutoConfigured(Installation es) throws Excep if (es.distribution.isPackage()) { if (Files.exists(es.config("elasticsearch.keystore"))) { assertThat( - sh.run(es.executables().keystoreTool + " list").stdout, + sh.run(es.executables().keystoreTool + " list").stdout(), not(Matchers.containsString("autoconfiguration.password_hash")) ); } } List configLines = Files.readAllLines(es.config("elasticsearch.yml")); - assertThat(configLines, not(contains(containsString("automatically generated in order to configure Security")))); + assertThat( + configLines, + not(contains(containsString("#----------------------- BEGIN SECURITY AUTO CONFIGURATION -----------------------"))) + ); Path caCert = ServerUtils.getCaCert(installation); if (caCert != null) { - assertThat(caCert.toString(), Matchers.not(Matchers.containsString("tls_auto_config"))); + assertThat(caCert.toString(), Matchers.not(Matchers.containsString("certs"))); } } @@ -745,8 +714,8 @@ public static Optional getAutoConfigDirName(Installation es) { } else { lsResult = sh.run("find \"" + es.config + "\" -type d -maxdepth 1"); } - assertNotNull(lsResult.stdout); - return Arrays.stream(lsResult.stdout.split("\n")).filter(f -> f.contains("tls_auto_config_")).findFirst(); + assertNotNull(lsResult.stdout()); + return Arrays.stream(lsResult.stdout().split("\n")).filter(f -> f.contains("certs")).findFirst(); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java index e52f0146ecc8f..623cf34ce8a46 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java @@ -47,7 +47,7 @@ public void test20GeneratePasswords() throws Exception { assertWhileRunning(() -> { ServerUtils.waitForElasticsearch(installation); Shell.Result result = installation.executables().setupPasswordsTool.run("auto --batch", null); - Map userpasses = parseUsersAndPasswords(result.stdout); + Map userpasses = parseUsersAndPasswords(result.stdout()); for (Map.Entry userpass : userpasses.entrySet()) { String response = ServerUtils.makeRequest( Request.Get("http://localhost:9200"), @@ -115,7 +115,7 @@ public void test40GeneratePasswordsBootstrapAlreadySet() throws Exception { assertWhileRunning(() -> { Shell.Result result = installation.executables().setupPasswordsTool.run("auto --batch", null); - Map userpasses = parseUsersAndPasswords(result.stdout); + Map userpasses = parseUsersAndPasswords(result.stdout()); assertThat(userpasses, hasKey("elastic")); for (Map.Entry userpass : userpasses.entrySet()) { String response = ServerUtils.makeRequest( diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PluginCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PluginCliTests.java index 104ac3dde95ac..ff67733a76339 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PluginCliTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PluginCliTests.java @@ -19,10 +19,13 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.List; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.junit.Assume.assumeFalse; import static org.junit.Assume.assumeTrue; @@ -109,7 +112,7 @@ public void test23ElasticsearchWithSpace() throws Exception { public void test24JavaOpts() throws Exception { sh.getEnv().put("ES_JAVA_OPTS", "-XX:+PrintFlagsFinal"); - assertWithExamplePlugin(installResult -> assertThat(installResult.stdout, containsString("MaxHeapSize"))); + assertWithExamplePlugin(installResult -> assertThat(installResult.stdout(), containsString("MaxHeapSize"))); } public void test25Umask() throws Exception { @@ -126,7 +129,7 @@ public void test30InstallFailsIfConfigFilePresent() throws IOException { Shell.Result result = installation.executables().pluginTool.run("install analysis-icu", null, true); assertThat(result.isSuccess(), is(false)); - assertThat(result.stderr, containsString("Plugins config [" + pluginsConfig + "] exists")); + assertThat(result.stderr(), containsString("Plugins config [" + pluginsConfig + "] exists")); } /** @@ -138,7 +141,7 @@ public void test31RemoveFailsIfConfigFilePresent() throws IOException { Shell.Result result = installation.executables().pluginTool.run("install analysis-icu", null, true); assertThat(result.isSuccess(), is(false)); - assertThat(result.stderr, containsString("Plugins config [" + pluginsConfig + "] exists")); + assertThat(result.stderr(), containsString("Plugins config [" + pluginsConfig + "] exists")); } /** @@ -158,4 +161,40 @@ public void test32FailsToStartWhenPluginsConfigExists() throws Exception { FileUtils.rm(installation.config("elasticsearch-plugins.yml")); } } + + /** + * Check that attempting to install a plugin that has been promoted to a module + * succeeds, but does nothing. + */ + public void test40InstallOfModularizedPluginsSucceedsButDoesNothing() { + for (String pluginId : List.of("repository-azure", "repository-gcs", "repository-s3")) { + String stderr = installation.executables().pluginTool.run("install " + pluginId).stderr(); + assertThat( + "Expected plugin installed to warn about migrated plugins", + stderr, + containsString("[" + pluginId + "] is no longer a plugin") + ); + + String pluginList = installation.executables().pluginTool.run("list").stdout(); + assertThat(pluginId + " should not appear in the plugin list", pluginList, not(containsString(pluginId))); + } + } + + /** + * Check that attempting to remove a plugin that has been promoted to a module + * succeeds, but does nothing. + */ + public void test41RemovalOfModularizedPluginsSucceedsButDoesNothing() { + String pluginList = installation.executables().pluginTool.run("list").stdout(); + assertThat("Expected no plugins to be installed", pluginList.trim(), is(emptyString())); + + for (String pluginId : List.of("repository-azure", "repository-gcs", "repository-s3")) { + String stderr = installation.executables().pluginTool.run("remove " + pluginId).stderr(); + assertThat( + "Expected plugin installer to warn about migrated plugins", + stderr, + containsString("[" + pluginId + "] is no longer a plugin") + ); + } + } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmMetadataTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmMetadataTests.java index cd46d62f09f8c..b09a185e50ce7 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmMetadataTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmMetadataTests.java @@ -35,11 +35,11 @@ public void test11Dependencies() { final Shell.Result deps = sh.run("rpm -qpR " + getDistributionFile(distribution())); - TestCase.assertTrue(Pattern.compile("(?m)^/bin/bash\\s*$").matcher(deps.stdout).find()); + TestCase.assertTrue(Pattern.compile("(?m)^/bin/bash\\s*$").matcher(deps.stdout()).find()); final Shell.Result conflicts = sh.run("rpm -qp --conflicts " + getDistributionFile(distribution())); String oppositePackageName = "elasticsearch-oss"; - TestCase.assertTrue(Pattern.compile("(?m)^" + oppositePackageName + "\\s*$").matcher(conflicts.stdout).find()); + TestCase.assertTrue(Pattern.compile("(?m)^" + oppositePackageName + "\\s*$").matcher(conflicts.stdout()).find()); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java index f81a2062b53e2..19a9d9b74048e 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java @@ -78,7 +78,7 @@ public void test30PreserveConfig() throws Exception { assertRemoved(distribution()); if (isSystemd()) { - assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode, is(1)); + assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode(), is(1)); } assertPathsDoNotExist( diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/SqlCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/SqlCliTests.java index 0180d37803058..b01f525a04fb5 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/SqlCliTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/SqlCliTests.java @@ -20,6 +20,6 @@ public void test010Install() throws Exception { public void test020Help() throws Exception { Shell.Result result = installation.executables().sqlCli.run("--help"); - assertThat(result.stdout, containsString("Elasticsearch SQL CLI")); + assertThat(result.stdout(), containsString("Elasticsearch SQL CLI")); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/TemporaryDirectoryConfigTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/TemporaryDirectoryConfigTests.java index 541458d201005..b6c898320cd0b 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/TemporaryDirectoryConfigTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/TemporaryDirectoryConfigTests.java @@ -103,7 +103,7 @@ public void test31VerifiesCustomPathInDocker() throws Exception { distribution(), DockerRun.builder().volume(tmpDir, tmpDir).envVar("LIBFFI_TMPDIR", tmpFile.toString()) ); - assertThat(result.stderr, containsString("LIBFFI_TMPDIR")); + assertThat(result.stderr(), containsString("LIBFFI_TMPDIR")); } private void withLibffiTmpdir(String tmpDir, CheckedConsumer action) throws Exception { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java index 4877274222b91..1d61174318a13 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java @@ -50,9 +50,9 @@ public void uninstallService() { private void assertService(String id, String status, String displayName) { Result result = sh.run("Get-Service " + id + " | Format-List -Property Name, Status, DisplayName"); - assertThat(result.stdout, containsString("Name : " + id)); - assertThat(result.stdout, containsString("Status : " + status)); - assertThat(result.stdout, containsString("DisplayName : " + displayName)); + assertThat(result.stdout(), containsString("Name : " + id)); + assertThat(result.stdout(), containsString("Status : " + status)); + assertThat(result.stdout(), containsString("DisplayName : " + displayName)); } // runs the service command, dumping all log files on failure @@ -69,8 +69,8 @@ private Result assertFailure(String script, int exitCode) { } private void assertExit(Result result, String script, int exitCode) { - if (result.exitCode != exitCode) { - logger.error("---- Unexpected exit code (expected " + exitCode + ", got " + result.exitCode + ") for script: " + script); + if (result.exitCode() != exitCode) { + logger.error("---- Unexpected exit code (expected " + exitCode + ", got " + result.exitCode() + ") for script: " + script); logger.error(result); logger.error("Dumping log files\n"); Result logs = sh.run( @@ -83,10 +83,10 @@ private void assertExit(Result result, String script, int exitCode) { + " Get-Content \"$file\" " + "}" ); - logger.error(logs.stdout); + logger.error(logs.stdout()); fail(); } else { - logger.info("\nscript: " + script + "\nstdout: " + result.stdout + "\nstderr: " + result.stderr); + logger.info("\nscript: " + script + "\nstdout: " + result.stdout() + "\nstderr: " + result.stderr()); } } @@ -102,8 +102,8 @@ public void test11InstallServiceExeMissing() throws IOException { Path tmpServiceExe = serviceExe.getParent().resolve(serviceExe.getFileName() + ".tmp"); Files.move(serviceExe, tmpServiceExe); Result result = sh.runIgnoreExitCode(serviceScript + " install"); - assertThat(result.exitCode, equalTo(1)); - assertThat(result.stdout, containsString("elasticsearch-service-x64.exe was not found...")); + assertThat(result.exitCode(), equalTo(1)); + assertThat(result.stdout(), containsString("elasticsearch-service-x64.exe was not found...")); Files.move(tmpServiceExe, serviceExe); } @@ -119,8 +119,8 @@ public void test13InstallMissingBundledJdk() throws IOException { try { mv(installation.bundledJdk, relocatedJdk); Result result = sh.runIgnoreExitCode(serviceScript + " install"); - assertThat(result.exitCode, equalTo(1)); - assertThat(result.stderr, containsString("could not find java in bundled JDK")); + assertThat(result.exitCode(), equalTo(1)); + assertThat(result.stderr(), containsString("could not find java in bundled JDK")); } finally { mv(relocatedJdk, installation.bundledJdk); } @@ -129,13 +129,13 @@ public void test13InstallMissingBundledJdk() throws IOException { public void test14InstallBadJavaHome() throws IOException { sh.getEnv().put("ES_JAVA_HOME", "doesnotexist"); Result result = sh.runIgnoreExitCode(serviceScript + " install"); - assertThat(result.exitCode, equalTo(1)); - assertThat(result.stderr, containsString("could not find java in ES_JAVA_HOME")); + assertThat(result.exitCode(), equalTo(1)); + assertThat(result.stderr(), containsString("could not find java in ES_JAVA_HOME")); } public void test15RemoveNotInstalled() { Result result = assertFailure(serviceScript + " remove", 1); - assertThat(result.stdout, containsString("Failed removing '" + DEFAULT_ID + "' service")); + assertThat(result.stdout(), containsString("Failed removing '" + DEFAULT_ID + "' service")); } public void test16InstallSpecialCharactersInJdkPath() throws IOException { @@ -146,7 +146,7 @@ public void test16InstallSpecialCharactersInJdkPath() throws IOException { try { mv(installation.bundledJdk, relocatedJdk); Result result = sh.run(serviceScript + " install"); - assertThat(result.stdout, containsString("The service 'elasticsearch-service-x64' has been installed.")); + assertThat(result.stdout(), containsString("The service 'elasticsearch-service-x64' has been installed.")); } finally { sh.runIgnoreExitCode(serviceScript + " remove"); mv(relocatedJdk, installation.bundledJdk); @@ -214,14 +214,14 @@ public void test30StartStop() throws Exception { public void test31StartNotInstalled() throws IOException { Result result = sh.runIgnoreExitCode(serviceScript + " start"); - assertThat(result.stdout, result.exitCode, equalTo(1)); - assertThat(result.stdout, containsString("Failed starting '" + DEFAULT_ID + "' service")); + assertThat(result.stdout(), result.exitCode(), equalTo(1)); + assertThat(result.stdout(), containsString("Failed starting '" + DEFAULT_ID + "' service")); } public void test32StopNotStarted() throws IOException { sh.run(serviceScript + " install"); Result result = sh.run(serviceScript + " stop"); // stop is ok when not started - assertThat(result.stdout, containsString("The service '" + DEFAULT_ID + "' has been stopped")); + assertThat(result.stdout(), containsString("The service '" + DEFAULT_ID + "' has been stopped")); } public void test33JavaChanged() throws Exception { @@ -247,20 +247,20 @@ public void test60Manager() throws IOException { Files.write(fakeServiceMgr, Arrays.asList("echo \"Fake Service Manager GUI\"")); Shell sh = new Shell(); Result result = sh.run(serviceScript + " manager"); - assertThat(result.stdout, containsString("Fake Service Manager GUI")); + assertThat(result.stdout(), containsString("Fake Service Manager GUI")); // check failure too Files.write(fakeServiceMgr, Arrays.asList("echo \"Fake Service Manager GUI Failure\"", "exit 1")); result = sh.runIgnoreExitCode(serviceScript + " manager"); - TestCase.assertEquals(1, result.exitCode); - TestCase.assertTrue(result.stdout, result.stdout.contains("Fake Service Manager GUI Failure")); + TestCase.assertEquals(1, result.exitCode()); + TestCase.assertTrue(result.stdout(), result.stdout().contains("Fake Service Manager GUI Failure")); Files.move(tmpServiceMgr, serviceMgr); } public void test70UnknownCommand() { Result result = sh.runIgnoreExitCode(serviceScript + " bogus"); - assertThat(result.exitCode, equalTo(1)); - assertThat(result.stdout, containsString("Unknown option \"bogus\"")); + assertThat(result.exitCode(), equalTo(1)); + assertThat(result.stdout(), containsString("Unknown option \"bogus\"")); } public void test80JavaOptsInEnvVar() throws Exception { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java index 48c978cad62a0..6600514c67f9e 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -43,12 +43,8 @@ public Distribution(Path path) { this.platform = filename.contains("windows") ? Platform.WINDOWS : Platform.LINUX; this.hasJdk = filename.contains("no-jdk") == false; - String version = filename.split("-", 3)[1]; - this.baseVersion = version; - if (filename.contains("-SNAPSHOT")) { - version += "-SNAPSHOT"; - } - this.version = version; + this.baseVersion = filename.split("-", 3)[1]; + this.version = filename.contains("-SNAPSHOT") ? this.baseVersion + "-SNAPSHOT" : this.baseVersion; } public boolean isArchive() { @@ -63,15 +59,10 @@ public boolean isPackage() { * @return whether this distribution is packaged as a Docker image. */ public boolean isDocker() { - switch (packaging) { - case DOCKER: - case DOCKER_UBI: - case DOCKER_IRON_BANK: - case DOCKER_CLOUD: - case DOCKER_CLOUD_ESS: - return true; - } - return false; + return switch (packaging) { + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS -> true; + default -> false; + }; } public enum Packaging { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java index 521846f7ed7fb..d1568bcf08a4f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java @@ -36,6 +36,7 @@ public class Installation { public final Path envFile; @Nullable private String elasticPassword; // auto-configured password upon installation + public final int port; private Installation( Shell sh, @@ -47,7 +48,8 @@ private Installation( Path plugins, Path modules, Path pidDir, - Path envFile + Path envFile, + int port ) { this.sh = sh; this.distribution = distribution; @@ -62,6 +64,7 @@ private Installation( this.modules = modules; this.pidDir = pidDir; this.envFile = envFile; + this.port = port; this.elasticPassword = null; } @@ -76,7 +79,8 @@ public static Installation ofArchive(Shell sh, Distribution distribution, Path h home.resolve("plugins"), home.resolve("modules"), null, - null + null, + 9200 ); } @@ -96,11 +100,12 @@ public static Installation ofPackage(Shell sh, Distribution distribution) { Paths.get("/usr/share/elasticsearch/plugins"), Paths.get("/usr/share/elasticsearch/modules"), Paths.get("/var/run/elasticsearch"), - envFile + envFile, + 9200 ); } - public static Installation ofContainer(Shell sh, Distribution distribution) { + public static Installation ofContainer(Shell sh, Distribution distribution, int port) { String root = "/usr/share/elasticsearch"; return new Installation( sh, @@ -112,10 +117,15 @@ public static Installation ofContainer(Shell sh, Distribution distribution) { Paths.get(root + "/plugins"), Paths.get(root + "/modules"), null, - null + null, + port ); } + public static Installation ofContainer(Shell sh, Distribution distribution) { + return ofContainer(sh, distribution, 9200); + } + /** * Returns the user that owns this installation. * diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java index e9cbf24034305..6d9cba73aa8f1 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java @@ -51,23 +51,23 @@ public class Packages { public static void assertInstalled(Distribution distribution) throws Exception { final Result status = packageStatus(distribution); - assertThat(status.exitCode, is(0)); + assertThat(status.exitCode(), is(0)); - Platforms.onDPKG(() -> assertFalse(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find())); + Platforms.onDPKG(() -> assertFalse(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout()).find())); } public static void assertRemoved(Distribution distribution) throws Exception { final Result status = packageStatus(distribution); - Platforms.onRPM(() -> assertThat(status.exitCode, is(1))); + Platforms.onRPM(() -> assertThat(status.exitCode(), is(1))); Platforms.onDPKG(() -> { - assertThat(status.exitCode, anyOf(is(0), is(1))); - if (status.exitCode == 0) { + assertThat(status.exitCode(), anyOf(is(0), is(1))); + if (status.exitCode() == 0) { assertTrue( - "an uninstalled status should be indicated: " + status.stdout, - Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find() - || Pattern.compile("(?m)^Status:.+ok not-installed").matcher(status.stdout).find() + "an uninstalled status should be indicated: " + status.stdout(), + Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout()).find() + || Pattern.compile("(?m)^Status:.+ok not-installed").matcher(status.stdout()).find() ); } }); @@ -84,16 +84,16 @@ public static Installation installPackage(Shell sh, Distribution distribution) t public static Installation installPackage(Shell sh, Distribution distribution, @Nullable Predicate outputPredicate) throws IOException { - String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout().trim(); if (distribution.hasJdk == false) { sh.getEnv().put("ES_JAVA_HOME", systemJavaHome); } final Result result = runPackageManager(distribution, sh, PackageManagerCommand.INSTALL); - if (result.exitCode != 0) { + if (result.exitCode() != 0) { throw new RuntimeException("Installing distribution " + distribution + " failed: " + result); } if (null != outputPredicate) { - assertThat(outputPredicate.test(result.stdout), is(true)); + assertThat(outputPredicate.test(result.stdout()), is(true)); } Installation installation = Installation.ofPackage(sh, distribution); installation.setElasticPassword(captureElasticPasswordFromOutput(result)); @@ -109,7 +109,7 @@ public static Installation installPackage(Shell sh, Distribution distribution, @ } private static String captureElasticPasswordFromOutput(Result result) { - return Arrays.stream(result.stdout.split(System.lineSeparator())) + return Arrays.stream(result.stdout().split(System.lineSeparator())) .filter(l -> l.contains("The generated password for the elastic built-in superuser is : ")) .map(l -> l.substring(63, 83)) .findFirst() @@ -118,7 +118,7 @@ private static String captureElasticPasswordFromOutput(Result result) { public static Installation upgradePackage(Shell sh, Distribution distribution) throws IOException { final Result result = runPackageManager(distribution, sh, PackageManagerCommand.UPGRADE); - if (result.exitCode != 0) { + if (result.exitCode() != 0) { throw new RuntimeException("Upgrading distribution " + distribution + " failed: " + result); } @@ -127,7 +127,7 @@ public static Installation upgradePackage(Shell sh, Distribution distribution) t public static Installation forceUpgradePackage(Shell sh, Distribution distribution) throws IOException { final Result result = runPackageManager(distribution, sh, PackageManagerCommand.FORCE_UPGRADE); - if (result.exitCode != 0) { + if (result.exitCode() != 0) { throw new RuntimeException("Force upgrading distribution " + distribution + " failed: " + result); } @@ -145,9 +145,9 @@ private static Result runPackageManager(Distribution distribution, Shell sh, Pac } else { String debOptions = DEB_OPTIONS.get(command); Result r = sh.runIgnoreExitCode("dpkg " + debOptions + " " + distributionArg); - if (r.exitCode != 0) { + if (r.exitCode() != 0) { Result lockOF = sh.runIgnoreExitCode("lsof /var/lib/dpkg/lock"); - if (lockOF.exitCode == 0) { + if (lockOF.exitCode() == 0) { throw new RuntimeException("dpkg failed and the lockfile still exists. " + "Failure:\n" + r + "\nLockfile:\n" + lockOF); } } @@ -162,13 +162,13 @@ public static void remove(Distribution distribution) throws Exception { Platforms.onRPM(() -> { final Result status = packageStatus(distribution); - assertThat(status.exitCode, is(1)); + assertThat(status.exitCode(), is(1)); }); Platforms.onDPKG(() -> { final Result status = packageStatus(distribution); - assertThat(status.exitCode, is(0)); - assertTrue(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find()); + assertThat(status.exitCode(), is(0)); + assertTrue(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout()).find()); }); } @@ -183,7 +183,7 @@ private static void verifyOssInstallation(Installation es, Distribution distribu sh.run("getent group elasticsearch"); final Result passwdResult = sh.run("getent passwd elasticsearch"); - final Path homeDir = Paths.get(passwdResult.stdout.trim().split(":")[5]); + final Path homeDir = Paths.get(passwdResult.stdout().trim().split(":")[5]); assertThat("elasticsearch user home directory must not exist", homeDir, fileDoesNotExist()); Stream.of(es.home, es.plugins, es.modules).forEach(dir -> assertThat(dir, file(Directory, "root", "root", p755))); @@ -192,19 +192,19 @@ private static void verifyOssInstallation(Installation es, Distribution distribu // we shell out here because java's posix file permission view doesn't support special modes assertThat(es.config, file(Directory, "root", "elasticsearch", p750)); - assertThat(sh.run("find \"" + es.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + es.config + "\" -maxdepth 0 -printf \"%m\"").stdout(), containsString("2750")); // We introduced the jvm.options.d folder in 7.7 if (Version.fromString(distribution.baseVersion).onOrAfter(Version.V_7_7_0)) { final Path jvmOptionsDirectory = es.config.resolve("jvm.options.d"); assertThat(jvmOptionsDirectory, file(Directory, "root", "elasticsearch", p750)); - assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout(), containsString("2750")); } Stream.of("elasticsearch.keystore", "elasticsearch.yml", "jvm.options", "log4j2.properties") .forEach(configFile -> assertThat(es.config(configFile), file(File, "root", "elasticsearch", p660))); - assertThat(sh.run("sudo -u elasticsearch " + es.bin("elasticsearch-keystore") + " list").stdout, containsString("keystore.seed")); + assertThat(sh.run("sudo -u elasticsearch " + es.bin("elasticsearch-keystore") + " list").stdout(), containsString("keystore.seed")); Stream.of(es.bin, es.lib).forEach(dir -> assertThat(dir, file(Directory, "root", "root", p755))); @@ -218,7 +218,7 @@ private static void verifyOssInstallation(Installation es, Distribution distribu if (distribution.packaging == Distribution.Packaging.RPM) { assertThat(es.home.resolve("LICENSE.txt"), file(File, "root", "root", p644)); } else { - Path copyrightDir = Paths.get(sh.run("readlink -f /usr/share/doc/elasticsearch").stdout.trim()); + Path copyrightDir = Paths.get(sh.run("readlink -f /usr/share/doc/elasticsearch").stdout().trim()); assertThat(copyrightDir, file(Directory, "root", "root", p755)); assertThat(copyrightDir.resolve("copyright"), file(File, "root", "root", p644)); } @@ -231,7 +231,7 @@ private static void verifyOssInstallation(Installation es, Distribution distribu ).forEach(confFile -> assertThat(confFile, file(File, "root", "root", p644))); final String sysctlExecutable = (distribution.packaging == Distribution.Packaging.RPM) ? "/usr/sbin/sysctl" : "/sbin/sysctl"; - assertThat(sh.run(sysctlExecutable + " vm.max_map_count").stdout, containsString("vm.max_map_count = 262144")); + assertThat(sh.run(sysctlExecutable + " vm.max_map_count").stdout(), containsString("vm.max_map_count = 262144")); } } @@ -271,8 +271,8 @@ public static Shell.Result runElasticsearchStartCommand(Shell sh) { sh.run("systemctl is-enabled elasticsearch.service"); Result exitCode = sh.runIgnoreExitCode("systemctl start elasticsearch.service"); if (exitCode.isSuccess() == false) { - logger.warn(sh.runIgnoreExitCode("systemctl status elasticsearch.service").stdout); - logger.warn(journald.getLogs().stdout); + logger.warn(sh.runIgnoreExitCode("systemctl status elasticsearch.service").stdout()); + logger.warn(journald.getLogs().stdout()); } return exitCode; } @@ -332,7 +332,7 @@ public JournaldWrapper(Shell sh) { */ public void clear() { final String script = "sudo journalctl --unit=elasticsearch.service --lines=0 --show-cursor -o cat | sed -e 's/-- cursor: //'"; - cursor = sh.run(script).stdout.trim(); + cursor = sh.run(script).stdout().trim(); } /** diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/ProcessInfo.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/ProcessInfo.java index 1b928a447e3c8..91fd00d86e2b2 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/ProcessInfo.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/ProcessInfo.java @@ -22,27 +22,15 @@ * works in Linux containers. At the moment that isn't a problem, because we only publish Docker images * for Linux. */ -public class ProcessInfo { - public final int pid; - public final int uid; - public final int gid; - public final String username; - public final String group; - - public ProcessInfo(int pid, int uid, int gid, String username, String group) { - this.pid = pid; - this.uid = uid; - this.gid = gid; - this.username = username; - this.group = group; - } +public record ProcessInfo(int pid, int uid, int gid, String username, String group) { /** * Fetches process information about command, using sh to execute commands. + * * @return a populated ProcessInfo object */ public static ProcessInfo getProcessInfo(Shell sh, String command) { - final List processes = sh.run("pgrep " + command).stdout.lines().collect(Collectors.toList()); + final List processes = sh.run("pgrep " + command).stdout().lines().collect(Collectors.toList()); assertThat("Expected a single process", processes, hasSize(1)); @@ -52,7 +40,7 @@ public static ProcessInfo getProcessInfo(Shell sh, String command) { int uid = -1; int gid = -1; - for (String line : sh.run("cat /proc/" + pid + "/status | grep '^[UG]id:'").stdout.split("\\n")) { + for (String line : sh.run("cat /proc/" + pid + "/status | grep '^[UG]id:'").stdout().split("\\n")) { final String[] fields = line.split("\\s+"); if (fields[0].equals("Uid:")) { @@ -62,8 +50,8 @@ public static ProcessInfo getProcessInfo(Shell sh, String command) { } } - final String username = sh.run("getent passwd " + uid + " | cut -f1 -d:").stdout.trim(); - final String group = sh.run("getent group " + gid + " | cut -f1 -d:").stdout.trim(); + final String username = sh.run("getent passwd " + uid + " | cut -f1 -d:").stdout().trim(); + final String group = sh.run("getent group " + gid + " | cut -f1 -d:").stdout().trim(); return new ProcessInfo(pid, uid, gid, username, group); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java index dccf58ff23c9c..04c0f305ce2e5 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java @@ -24,6 +24,7 @@ import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.packaging.test.PackagingTestCase; import java.io.IOException; @@ -76,7 +77,9 @@ public static void waitForElasticsearch(Installation installation) throws Except String configFile = Files.readString(configFilePath, StandardCharsets.UTF_8); securityEnabled = configFile.contains(SECURITY_DISABLED) == false; } else { - final Optional commandLine = dockerShell.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines() + final Optional commandLine = dockerShell.run("bash -c 'COLUMNS=2000 ps ax'") + .stdout() + .lines() .filter(line -> line.contains("org.elasticsearch.bootstrap.Elasticsearch")) .findFirst(); if (commandLine.isPresent() == false) { @@ -150,7 +153,7 @@ private static void waitForXpack(Installation installation) { int retries = 60; while (retries > 0) { retries -= 1; - try (Socket s = new Socket(InetAddress.getLoopbackAddress(), 9200)) { + try (Socket s = new Socket(InetAddress.getLoopbackAddress(), installation.port)) { return; } catch (IOException e) { // ignore, only want to establish a connection @@ -173,7 +176,7 @@ private static void waitForXpack(Installation installation) { public static Path getCaCert(Installation installation) throws IOException { if (installation.distribution.isDocker()) { final Path tempDir = PackagingTestCase.createTempDir("docker-ssl"); - final Path autoConfigurationDir = findInContainer(installation.config, "d", "\"tls_auto_config_*\""); + final Path autoConfigurationDir = findInContainer(installation.config, "d", "\"certs\""); if (autoConfigurationDir != null) { final Path hostHttpCaCert = tempDir.resolve("http_ca.crt"); copyFromContainer(autoConfigurationDir.resolve("http_ca.crt"), hostHttpCaCert); @@ -193,13 +196,15 @@ public static Path getCaCert(Path configPath) throws IOException { Path configFilePath = configPath.resolve("elasticsearch.yml"); if (Files.exists(configFilePath)) { // In docker we might not even have a file, and if we do it's not in the host's FS - String configFile = Files.readString(configFilePath, StandardCharsets.UTF_8); - enrollmentEnabled = configFile.contains("xpack.security.enrollment.enabled: true"); - httpSslEnabled = configFile.contains("xpack.security.http.ssl.enabled: true"); + Settings settings = Settings.builder().loadFromPath(configFilePath).build(); + enrollmentEnabled = settings.hasValue("xpack.security.enrollment.enabled") + && settings.get("xpack.security.enrollment.enabled").equals("true"); + httpSslEnabled = settings.hasValue("xpack.security.http.ssl.enabled") + && settings.get("xpack.security.http.ssl.enabled").equals("true"); } if (enrollmentEnabled && httpSslEnabled) { assert Files.exists(caCert) == false; - List allAutoconfTLS = FileUtils.lsGlob(configPath, "tls_auto_config_*"); + List allAutoconfTLS = FileUtils.lsGlob(configPath, "certs*"); assertThat(allAutoconfTLS.size(), is(1)); Path autoconfTLSDir = allAutoconfTLS.get(0); caCert = autoconfTLSDir.resolve("http_ca.crt"); @@ -366,7 +371,15 @@ public static int makeRequestAndGetStatus(Request request, String username, Stri } public static void disableGeoIpDownloader(Installation installation) throws IOException { - addSettingToExistingConfiguration(installation, "ingest.geoip.downloader.enabled", "false"); + // We don't use addSettingToExistingConfiguration because it would overwrite comments in the settings file + // and we might want to check for them later on to test auto-configuration + Path yml = installation.config("elasticsearch.yml"); + List lines; + try (Stream allLines = Files.readAllLines(yml).stream()) { + lines = allLines.filter(s -> s.startsWith("ingest.geoip.downloader.enabled") == false).collect(Collectors.toList()); + } + lines.add("ingest.geoip.downloader.enabled: false"); + Files.write(yml, lines, TRUNCATE_EXISTING); } public static void enableGeoIpDownloader(Installation installation) throws IOException { @@ -377,21 +390,19 @@ public static void enableGeoIpDownloader(Installation installation) throws IOExc * Explicitly disables security features */ public static void disableSecurityFeatures(Installation installation) throws IOException { - List disabledSecurityFeatures = List.of( - "xpack.security.http.ssl.enabled: false", - "xpack.security.transport.ssl.enabled: false", - "xpack.security.enabled: false" - ); Path yamlFile = installation.config("elasticsearch.yml"); - List lines; - try (Stream allLines = Files.readAllLines(yamlFile).stream()) { - lines = allLines.filter(l -> l.startsWith("xpack.security.http.ssl") == false) - .filter(l -> l.startsWith("xpack.security.transport.ssl") == false) - .filter(l -> l.startsWith("xpack.security.enabled:") == false) - .collect(Collectors.toList()); - } - lines.addAll(disabledSecurityFeatures); - Files.write(yamlFile, lines, TRUNCATE_EXISTING); + final Settings settings = Settings.builder().loadFromPath(yamlFile).build(); + final Settings newSettings = Settings.builder() + .put(settings.filter(k -> k.startsWith("xpack.security") == false)) + .put("xpack.security.http.ssl.enabled", false) + .put("xpack.security.transport.ssl.enabled", false) + .put("xpack.security.enabled", false) + .build(); + Files.write( + yamlFile, + newSettings.keySet().stream().map(k -> k + ": " + newSettings.get(k)).collect(Collectors.toList()), + TRUNCATE_EXISTING + ); } @@ -408,22 +419,33 @@ public static void enableSecurityAutoConfiguration(Installation installation) th } public static void addSettingToExistingConfiguration(Installation installation, String setting, String value) throws IOException { - Path yml = installation.config("elasticsearch.yml"); - List lines; - try (Stream allLines = Files.readAllLines(yml).stream()) { - lines = allLines.filter(s -> s.startsWith(setting) == false).collect(Collectors.toList()); - } - lines.add(setting + ": " + value); - Files.write(yml, lines, TRUNCATE_EXISTING); + addSettingToExistingConfiguration(installation.config, setting, value); } public static void removeSettingFromExistingConfiguration(Installation installation, String setting) throws IOException { - Path yml = installation.config("elasticsearch.yml"); - List lines; - try (Stream allLines = Files.readAllLines(yml).stream()) { - lines = allLines.filter(s -> s.startsWith(setting) == false).collect(Collectors.toList()); - } - Files.write(yml, lines, TRUNCATE_EXISTING); + removeSettingFromExistingConfiguration(installation.config, setting); + } + + public static void addSettingToExistingConfiguration(Path confPath, String setting, String value) throws IOException { + Path yml = confPath.resolve("elasticsearch.yml"); + final Settings settings = Settings.builder().loadFromPath(yml).build(); + final Settings newSettings = Settings.builder().put(settings).put(setting, value).build(); + Files.write( + yml, + newSettings.keySet().stream().map(k -> k + ": " + newSettings.get(k)).collect(Collectors.toList()), + TRUNCATE_EXISTING + ); + } + + public static void removeSettingFromExistingConfiguration(Path confPath, String setting) throws IOException { + Path yml = confPath.resolve("elasticsearch.yml"); + final Settings settings = Settings.builder().loadFromPath(yml).build(); + final Settings newSettings = Settings.builder().put(settings.filter(k -> k.equals(setting) == false)).build(); + Files.write( + yml, + newSettings.keySet().stream().map(k -> k + ": " + newSettings.get(k)).collect(Collectors.toList()), + TRUNCATE_EXISTING + ); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java index 6802ac54fdacb..de3cc90ad50c6 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java @@ -87,28 +87,19 @@ public void chown(Path path) throws Exception { public void chown(Path path, String newOwner) throws Exception { logger.info("Chowning " + path + " to " + newOwner); Platforms.onLinux(() -> run("chown -R elasticsearch:elasticsearch " + path)); - Platforms.onWindows( - () -> run( - String.format( - Locale.ROOT, - "$account = New-Object System.Security.Principal.NTAccount '%s'; " - + "$pathInfo = Get-Item '%s'; " - + "$toChown = @(); " - + "if ($pathInfo.PSIsContainer) { " - + " $toChown += Get-ChildItem '%s' -Recurse; " - + "}" - + "$toChown += $pathInfo; " - + "$toChown | ForEach-Object { " - + " $acl = Get-Acl $_.FullName; " - + " $acl.SetOwner($account); " - + " Set-Acl $_.FullName $acl " - + "}", - newOwner, - path, - path - ) - ) - ); + Platforms.onWindows(() -> run(String.format(Locale.ROOT, """ + $account = New-Object System.Security.Principal.NTAccount '%s'; + $pathInfo = Get-Item '%s'; + $toChown = @(); + if ($pathInfo.PSIsContainer) { + $toChown += Get-ChildItem '%s' -Recurse; + } + $toChown += $pathInfo; + $toChown | ForEach-Object { + $acl = Get-Acl $_.FullName; + $acl.SetOwner($account); + Set-Acl $_.FullName $acl + }""", newOwner, path, path))); } public void extractZip(Path zipPath, Path destinationDir) throws Exception { @@ -237,21 +228,13 @@ public String toString() { return String.format(Locale.ROOT, " env = [%s] workingDirectory = [%s]", env, workingDirectory); } - public static class Result { - public final int exitCode; - public final String stdout; - public final String stderr; - - public Result(int exitCode, String stdout, String stderr) { - this.exitCode = exitCode; - this.stdout = stdout; - this.stderr = stderr; - } + public record Result(int exitCode, String stdout, String stderr) { public boolean isSuccess() { return exitCode == 0; } + @Override public String toString() { return String.format(Locale.ROOT, "exitCode = [%d] stdout = [%s] stderr = [%s]", exitCode, stdout.trim(), stderr.trim()); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java index 96bfa1f82cb41..c6e90859324ca 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java @@ -83,7 +83,7 @@ public class Docker { * @param distribution details about the docker image to potentially load. */ public static void ensureImageIsLoaded(Distribution distribution) { - final long count = sh.run("docker image ls --format '{{.Repository}}' " + getImageName(distribution)).stdout.lines().count(); + final long count = sh.run("docker image ls --format '{{.Repository}}' " + getImageName(distribution)).stdout().lines().count(); if (count != 0) { return; @@ -120,6 +120,27 @@ public static Installation runContainer(Distribution distribution, DockerRun bui return Installation.ofContainer(dockerShell, distribution); } + /** + * Runs an Elasticsearch Docker container without removing any existing containers first, + * and checks that it has started up successfully. + * + * @param distribution details about the docker image being tested + * @param builder the command to run + * @param restPort the port to expose the REST endpoint on + * @param transportPort the port to expose the transport endpoint on + * @return an installation that models the running container + */ + public static Installation runAdditionalContainer(Distribution distribution, DockerRun builder, int restPort, int transportPort) { + // TODO Maybe revisit this as part of https://github.com/elastic/elasticsearch/issues/79688 + final String command = builder.distribution(distribution) + .extraArgs("--publish", transportPort + ":9300", "--publish", restPort + ":9200") + .build(); + logger.info("Running command: " + command); + containerId = sh.run(command).stdout().trim(); + waitForElasticsearchToStart(); + return Installation.ofContainer(dockerShell, distribution, restPort); + } + /** * Similar to {@link #runContainer(Distribution, DockerRun)} in that it runs an Elasticsearch Docker * container, expect that the container expecting it to exit e.g. due to configuration problem. @@ -142,7 +163,7 @@ private static void executeDockerRun(Distribution distribution, DockerRun builde final String command = builder.distribution(distribution).build(); logger.info("Running command: " + command); - containerId = sh.run(command).stdout.trim(); + containerId = sh.run(command).stdout().trim(); } /** @@ -161,7 +182,7 @@ public static void waitForElasticsearchToStart() { Thread.sleep(STARTUP_SLEEP_INTERVAL_MILLISECONDS); // Set COLUMNS so that `ps` doesn't truncate its output - psOutput = dockerShell.run("bash -c 'COLUMNS=2000 ps ax'").stdout; + psOutput = dockerShell.run("bash -c 'COLUMNS=2000 ps ax'").stdout(); if (psOutput.contains("org.elasticsearch.bootstrap.Elasticsearch")) { isElasticsearchRunning = true; @@ -174,14 +195,18 @@ public static void waitForElasticsearchToStart() { if (isElasticsearchRunning == false) { final Shell.Result dockerLogs = getContainerLogs(); - fail( - "Elasticsearch container did not start successfully.\n\nps output:\n" - + psOutput - + "\n\nStdout:\n" - + dockerLogs.stdout - + "\n\nStderr:\n" - + dockerLogs.stderr - ); + fail(""" + Elasticsearch container did not start successfully. + + ps output: + %s + + stdout(): + %s + + Stderr: + %s\ + """.formatted(psOutput, dockerLogs.stdout(), dockerLogs.stderr())); } } @@ -197,7 +222,7 @@ private static void waitForElasticsearchToExit() { // Give the container a chance to exit out Thread.sleep(2000); - if (sh.run("docker ps --quiet --no-trunc").stdout.contains(containerId) == false) { + if (sh.run("docker ps --quiet --no-trunc").stdout().contains(containerId) == false) { isElasticsearchRunning = false; break; } @@ -208,36 +233,52 @@ private static void waitForElasticsearchToExit() { if (isElasticsearchRunning) { final Shell.Result dockerLogs = getContainerLogs(); - fail("Elasticsearch container didn't exit.\n\nStdout:\n" + dockerLogs.stdout + "\n\nStderr:\n" + dockerLogs.stderr); + fail(""" + Elasticsearch container didn't exit. + + stdout(): + %s + + Stderr: + %s\ + """.formatted(dockerLogs.stdout(), dockerLogs.stderr())); } } /** - * Removes the currently running container. + * Removes the container with a given id */ - public static void removeContainer() { + public static void removeContainer(String containerId) { if (containerId != null) { - try { - // Remove the container, forcibly killing it if necessary - logger.debug("Removing container " + containerId); - final String command = "docker rm -f " + containerId; - final Shell.Result result = sh.runIgnoreExitCode(command); - - if (result.isSuccess() == false) { - boolean isErrorAcceptable = result.stderr.contains("removal of container " + containerId + " is already in progress") - || result.stderr.contains("Error: No such container: " + containerId); - - // I'm not sure why we're already removing this container, but that's OK. - if (isErrorAcceptable == false) { - throw new RuntimeException("Command was not successful: [" + command + "] result: " + result); - } + // Remove the container, forcibly killing it if necessary + logger.debug("Removing container " + containerId); + final String command = "docker rm -f " + containerId; + final Shell.Result result = sh.runIgnoreExitCode(command); + + if (result.isSuccess() == false) { + boolean isErrorAcceptable = result.stderr().contains("removal of container " + containerId + " is already in progress") + || result.stderr().contains("Error: No such container: " + containerId); + + // I'm not sure why we're already removing this container, but that's OK. + if (isErrorAcceptable == false) { + throw new RuntimeException("Command was not successful: [" + command + "] result: " + result); } - } finally { - // Null out the containerId under all circumstances, so that even if the remove command fails - // for some reason, the other tests will still proceed. Otherwise they can get stuck, continually - // trying to remove a non-existent container ID. - containerId = null; } + + } + } + + /** + * Removes the currently running container. + */ + public static void removeContainer() { + try { + removeContainer(containerId); + } finally { + // Null out the containerId under all circumstances, so that even if the remove command fails + // for some reason, the other tests will still proceed. Otherwise they can get stuck, continually + // trying to remove a non-existent container ID. + containerId = null; } } @@ -286,8 +327,8 @@ public static Path findInContainer(Path base, String type, String pattern) throw logger.debug("Trying to look for " + pattern + " ( " + type + ") in " + base + " in the container"); final String script = "docker exec " + containerId + " find " + base + " -type " + type + " -iname " + pattern; final Shell.Result result = sh.run(script); - if (result.isSuccess() && Strings.isNullOrEmpty(result.stdout) == false) { - String path = result.stdout; + if (result.isSuccess() && Strings.isNullOrEmpty(result.stdout()) == false) { + String path = result.stdout(); if (path.split(System.lineSeparator()).length > 1) { path = path.split(System.lineSeparator())[1]; } @@ -413,7 +454,7 @@ public static void verifyContainerInstallation(Installation es) { dockerShell.run("getent group elasticsearch"); final Shell.Result passwdResult = dockerShell.run("getent passwd elasticsearch"); - final String homeDir = passwdResult.stdout.trim().split(":")[5]; + final String homeDir = passwdResult.stdout().trim().split(":")[5]; assertThat("elasticsearch user's home directory is incorrect", homeDir, equalTo("/usr/share/elasticsearch")); assertThat(es.home, file(Directory, "root", "root", p775)); @@ -426,7 +467,7 @@ public static void verifyContainerInstallation(Installation es) { Stream.of(es.bin, es.config, es.logs, es.config.resolve("jvm.options.d"), es.data, es.plugins) .forEach(dir -> assertThat(dir, file(Directory, "elasticsearch", "root", p775))); - final String arch = dockerShell.run("arch").stdout.trim(); + final String arch = dockerShell.run("arch").stdout().trim(); Stream.of(es.bin, es.bundledJdk.resolve("bin"), es.modules.resolve("x-pack-ml/platform/linux-" + arch + "/bin")) .forEach( @@ -444,7 +485,7 @@ public static void verifyContainerInstallation(Installation es) { Stream.of("LICENSE.txt", "NOTICE.txt", "README.asciidoc") .forEach(doc -> assertThat(es.home.resolve(doc), file("root", "root", p444))); - assertThat(dockerShell.run(es.bin("elasticsearch-keystore") + " list").stdout, containsString("keystore.seed")); + assertThat(dockerShell.run(es.bin("elasticsearch-keystore") + " list").stdout(), containsString("keystore.seed")); // nc is useful for checking network issues // zip/unzip are installed to help users who are working with certificates. @@ -529,7 +570,7 @@ private static void withLogging(CheckedRunnable r) thro r.run(); } catch (Exception e) { final Shell.Result logs = getContainerLogs(); - logger.warn("Elasticsearch container failed to start.\n\nStdout:\n" + logs.stdout + "\n\nStderr:\n" + logs.stderr); + logger.warn("Elasticsearch container failed to start.\n\nStdout:\n" + logs.stdout() + "\n\nStderr:\n" + logs.stderr()); throw e; } } @@ -630,7 +671,7 @@ public static List getImageHealthcheck(Distribution distribution) throws } private static JsonNode getImageInspectionJson(Distribution distribution) throws Exception { - String labelsJson = sh.run("docker inspect " + getImageName(distribution)).stdout; + String labelsJson = sh.run("docker inspect " + getImageName(distribution)).stdout(); ObjectMapper mapper = new ObjectMapper(); return mapper.readTree(labelsJson).get(0); } @@ -652,7 +693,7 @@ static PosixFileAttributes getAttributes(Path path) throws FileNotFoundException throw new FileNotFoundException(path + " does not exist"); } - final String[] components = result.stdout.split("\\s+"); + final String[] components = result.stdout().split("\\s+"); final String permissions = components[2]; final String fileType = permissions.substring(0, 1); @@ -677,7 +718,7 @@ static PosixFileAttributes getAttributes(Path path) throws FileNotFoundException * @return the listing */ public static List listContents(String path) { - return dockerShell.run("ls -1 --color=never " + path).stdout.lines().collect(Collectors.toList()); + return dockerShell.run("ls -1 --color=never " + path).stdout().lines().collect(Collectors.toList()); } public static List listContents(Path path) { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java index fdf4201c96f12..caae6e2635c0f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java @@ -70,18 +70,18 @@ public DockerRun volume(Path from, Path to) { /** * Sets the UID that the container is run with, and the GID too if specified. * - * @param uid the UID to use, or {@code null} to use the image default - * @param gid the GID to use, or {@code null} to use the image default + * @param uidToUse the UID to use, or {@code null} to use the image default + * @param gidToUse the GID to use, or {@code null} to use the image default * @return the current builder */ - public DockerRun uid(Integer uid, Integer gid) { - if (uid == null) { - if (gid != null) { + public DockerRun uid(Integer uidToUse, Integer gidToUse) { + if (uidToUse == null) { + if (gidToUse != null) { throw new IllegalArgumentException("Cannot override GID without also overriding UID"); } } - this.uid = uid; - this.gid = gid; + this.uid = uidToUse; + this.gid = gidToUse; return this; } @@ -108,12 +108,12 @@ String build() { this.envVars.forEach((key, value) -> cmd.add("--env " + key + "=\"" + value + "\"")); - // The container won't run without configuring discovery - cmd.add("--env discovery.type=single-node"); - // Map ports in the container to the host, so that we can send requests - cmd.add("--publish 9200:9200"); - cmd.add("--publish 9300:9300"); + // allow ports to be overridden by tests + if (this.extraArgs.stream().anyMatch(arg -> arg.startsWith("-p") || arg.startsWith("--publish")) == false) { + cmd.add("--publish 9200:9200"); + cmd.add("--publish 9300:9300"); + } // Bind-mount any volumes volumes.forEach((localPath, containerPath) -> { @@ -153,32 +153,14 @@ String build() { * @return an image name */ public static String getImageName(Distribution distribution) { - String suffix; - - switch (distribution.packaging) { - case DOCKER: - suffix = ""; - break; - - case DOCKER_UBI: - suffix = "-ubi8"; - break; - - case DOCKER_IRON_BANK: - suffix = "-ironbank"; - break; - - case DOCKER_CLOUD: - suffix = "-cloud"; - break; - - case DOCKER_CLOUD_ESS: - suffix = "-cloud-ess"; - break; - - default: - throw new IllegalStateException("Unexpected distribution packaging type: " + distribution.packaging); - } + String suffix = switch (distribution.packaging) { + case DOCKER -> ""; + case DOCKER_UBI -> "-ubi8"; + case DOCKER_IRON_BANK -> "-ironbank"; + case DOCKER_CLOUD -> "-cloud"; + case DOCKER_CLOUD_ESS -> "-cloud-ess"; + default -> throw new IllegalStateException("Unexpected distribution packaging type: " + distribution.packaging); + }; return "elasticsearch" + suffix + ":test"; } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerShell.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerShell.java index 926e790131e7b..0847302f1c40f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerShell.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerShell.java @@ -8,8 +8,10 @@ package org.elasticsearch.packaging.util.docker; +import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.packaging.util.Shell; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -51,8 +53,8 @@ public Result run(String script) { logger.error( "Command [{}] failed.\n\nContainer stdout: [{}]\n\nContainer stderr: [{}]", script, - dockerLogs.stdout, - dockerLogs.stderr + dockerLogs.stdout(), + dockerLogs.stderr() ); } catch (ShellException shellException) { logger.error( @@ -64,4 +66,34 @@ public Result run(String script) { throw e; } } + + /** + * Execute a command inside the Docker container, but without invoking a local shell. The caller + * is entirely responsible for correctly escaping command arguments, or for invoking a shell + * inside the container if required. + * @param args the command and arguments to execute inside the container + * @return the result of executing the command + */ + public static Shell.Result executeCommand(String... args) { + assert Docker.containerId != null; + + final String[] prefix = new String[] { "docker", "exec", "--tty", Docker.containerId }; + final String[] command = ArrayUtils.concat(prefix, args); + final ProcessBuilder pb = new ProcessBuilder(command); + + final Process p; + final int exitCode; + final String stdout; + final String stderr; + try { + p = pb.start(); + exitCode = p.waitFor(); + stdout = new String(p.getInputStream().readAllBytes(), StandardCharsets.UTF_8).trim(); + stderr = new String(p.getErrorStream().readAllBytes(), StandardCharsets.UTF_8).trim(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + return new Shell.Result(exitCode, stdout, stderr); + } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/MockServer.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/MockServer.java index 94d8ee83b7903..d2c43e478df8f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/MockServer.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/MockServer.java @@ -85,7 +85,7 @@ private MockServer() { private void start() throws Exception { final String command = "docker run -t --detach --rm -p " + CONTAINER_PORT + ":" + CONTAINER_PORT + " mockserver/mockserver:latest"; - this.containerId = this.shell.run(command).stdout.trim(); + this.containerId = this.shell.run(command).stdout().trim(); // It's a Java app, so give it a chance to wake up. I'd add a healthcheck to the above command, // but the image doesn't have any CLI utils at all. diff --git a/qa/os/src/test/resources/org/elasticsearch/packaging/test/http.crt b/qa/os/src/test/resources/org/elasticsearch/packaging/test/http.crt new file mode 100644 index 0000000000000..e1025dd2d2aaa --- /dev/null +++ b/qa/os/src/test/resources/org/elasticsearch/packaging/test/http.crt @@ -0,0 +1,40 @@ +-----BEGIN CERTIFICATE----- +MIIDPzCCAiegAwIBAgIVAPIVDR5rVSUV+dljxvdiQHFVkwipMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTIyMDExOTE3NDQyMloXDTI1MDExODE3NDQyMlowEzERMA8G +A1UEAxMIaW5zdGFuY2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCe +79lY0Q4PQm24IBR6OAtjLSdFLFtwC8/i1Z5ujgPkkNW5QrlveGRU52V5Vp9CC8lc +ngiKwRcW5vmG9pvlKaWFImxE6Ap/2OH7sVCgHBhysmDI+naAAnFch2qB7dUr6vn7 +hN6KhdhuCBVxDGK9kBk+6Lo4eSk2lIN5tSf92pHZlcR9rkf5giDoQ3qDZHNvPSlX +kdHdag0VtoxSvHUi1AGcoW4Hq1YqayeO8s+Acm2MnnNgweK4O9YElEVqsqldQlZ/ +jRgygLAHwgmG+kVahwI//ok0c208MBq3ZZBthAuxjT5a9fqW+9OASgexGR+qmp3+ +zT94bgRGg28EEQ7lzLMpAgMBAAGjaTBnMB0GA1UdDgQWBBS4WdAtqOnzKXm60Q3O +2sEYTRwEbDAfBgNVHSMEGDAWgBSouO0kAGN6VSErE0jElIB7IQyvpDAaBgNVHREE +EzARgglsb2NhbGhvc3SHBH8AAAEwCQYDVR0TBAIwADANBgkqhkiG9w0BAQsFAAOC +AQEAk3Fad2CkccpvjQBfs6+8rN+sVNUTiFyOJ4EFF+OlsCVSAVYgcX5wi3ddUPHL +2TKOvKmiAF/aWQ8X4wQWBPq0xBN56qwNbxGv2Fc/9dMQo+YtEt2+3yCi83tpAyjP +hAId4aHFRCjzcfb0Zwq7qmfrtorxfY59dAXWHCNhTcxETFCKxaBg7ZSWLFXSef/q +fL64iyxzb2gctCPHgAp/jANpO1vGLPBO0M1mCBp/I95jgyscZdX9TgqwXuZgsI+d +nGUsd8cUCxnA6RLiek+z7Y8gN/RITmGYuCGMfWqKFrUQ4wQZRkZJEE7nyJis/uCK +VdJyfewvRvjnLvqfgF16cBJwQw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIVALA5mjBbdcSBX/AX5ugQy+gbiBJwMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTIyMDExOTA5MDIzM1oXDTI1MDExODA5MDIzM1owNDEyMDAG +A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCfQ7Br31QqYFaXjWYKG8Vh +FnPMnZGAT3L9xW7TdBQ1vlp3pnv77vMg0NZXLLx7FUp5HzZj/I2mUdADTxL/fWg5 +WCtPH6UzFFimk8H2v30OFGSGkdIB6tAXuesuZBihIhIb14OY4btBWoyUwOdMgRX8 +SAzFq+zpq3P49Aiv9tU7icXJyrD2wZCIS0L/nogjIFXXnmUQLFYfVlm7xFQnFTqw +sdTpKthkgQyV6hYaCInktP+X+osOrlnOqHWpRpqgqqj1OB/TqocACpgH1Wmgt0F+ +IR0acVWR1jV0EbSL15i0QTRFgw4/7AbXXf8SKtkhw+SP+epyjDsh9mA1gSiT5q1t +AgMBAAGjUzBRMB0GA1UdDgQWBBSouO0kAGN6VSErE0jElIB7IQyvpDAfBgNVHSME +GDAWgBSouO0kAGN6VSErE0jElIB7IQyvpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQB1tuaFxErPbAlVojdRTFbosqoNRS4kcXhKO3Evk4h9yqkH +kplWPv+5/0PRFycYu9eZau0Gghzsd7ePcra8WLLwFPofuJad6wefWvbb0qGZmsi+ +yQW8/CGWTVVjJZPc1WMElP4eLvMhPrdS2Wioq2s4b9vYHBUHxLrDsx9dr4A4s4Yw +/dt0b15KrscNRXdM0rnvhAghh6grZ+P9lg4wyDEYr3e3ZUROPBWBT/yjveNOLj7n +7M28rgVkAvKzqtb3shLQL4UnsQJfB67sKpruIt+VjecUaTjvLyYaH4NvnlvqOIr3 +Eg+gjpSRGnatAzgwBHx5WYU4FTKfGdrmO81kngyA +-----END CERTIFICATE----- diff --git a/qa/os/src/test/resources/org/elasticsearch/packaging/test/http.key b/qa/os/src/test/resources/org/elasticsearch/packaging/test/http.key new file mode 100644 index 0000000000000..98b90407b6dae --- /dev/null +++ b/qa/os/src/test/resources/org/elasticsearch/packaging/test/http.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCe79lY0Q4PQm24 +IBR6OAtjLSdFLFtwC8/i1Z5ujgPkkNW5QrlveGRU52V5Vp9CC8lcngiKwRcW5vmG +9pvlKaWFImxE6Ap/2OH7sVCgHBhysmDI+naAAnFch2qB7dUr6vn7hN6KhdhuCBVx +DGK9kBk+6Lo4eSk2lIN5tSf92pHZlcR9rkf5giDoQ3qDZHNvPSlXkdHdag0VtoxS +vHUi1AGcoW4Hq1YqayeO8s+Acm2MnnNgweK4O9YElEVqsqldQlZ/jRgygLAHwgmG ++kVahwI//ok0c208MBq3ZZBthAuxjT5a9fqW+9OASgexGR+qmp3+zT94bgRGg28E +EQ7lzLMpAgMBAAECggEAC2x05E/eYVVmenn/zssRcnbv4CZwigynTAgLo6mceQS5 +/99eYbc3Cu0423BQ8RfUyM3pEkQPq0s9uygli2KjbX7MZmWBP7Awif00LNXoIfJV +R4zNEKVcTYjELIOSM15nyl7B+hXluP7mv6HixPpC/kUPAXkf77bb/lb5gWMA4bEn +ghRhHcB9wLh9UZT8rP9q8fisygnGoRRxP2vdVYP7aczeW+tdIjyPV86QTkLgQgE/ +KA5BHJihexzqufoud9IOpp7klp4c3VYcnWVYBob71aREQC6CG3iRU1hNMZHOCCeP +lM8avVkXakGHzjOWceKLUTafmEcnAJ9+RUss+MZX+wKBgQDZs2E3lTzThy7Wa5SK +YmrZUB+7W24Or1SzJtjqulDDukQewlS8Bb2HuPAkA+Gv3/euP96XlQNwHr/WEBwE +JPDqyZQobiYRS2TEdaOn8ALrTyfVo+VIqaDArZo/BBFjfASNFhs6vaEccO+TcEJr +iCbE7iuPYRQmRmuhlwnd0Pwg8wKBgQC65epONDH7VNbmqe8Jd/zzX8Chwm+bcUZb +uDSxi1vfFpp+zvx+EVjyquQz0yE4czTuogU5UesIBx9qOU5eBpbnjqZ0Q3yyRrR/ +tRZMxcpr9Zao42t7LNBAsLaONKZxpbNQifkuT60eFRcBrYmvu+ak7rj1am8FHXC/ +HfvYlJgCcwKBgAqG2v+WDs/nrIMfxpn+ck45yp8Dp8m7/qt/CGQpSDh+rEaUfQZu +fKCwd6Q2L9aSTOa3HetUgEsOoZYXR1OH+cJQpwJheSPC8odxbM11FJ8OP83b0/10 +flpyyo+bgb++wnWUCRMJEl5Os7b8aanLdpx1K9QODKxhIVk59XctYNOzAoGBAK7k +XUiHIUpuTpcU6AF1Tbg0nx7/ws6zxkIn8u89WGA/V51YdgBGnXSfo+I3LLwVWq// +32GPHZ+qgqQ1MfdctTPEUlEzoel1roxe2ZpH8C7sZuptvfNKsSoH8xzJegOZKl4w +boP+vSIMXDYOvWw8Kg2m8l99FJy1uv6swTIfgsuvAoGBAJ9Cmsol0u/WqzWBKmHY +oR7ukYgUG6dRiKkz971UBDp0J7SrVQ437OO4IzsSg4Rt0rM/y+EIx5NM0PLq1KKZ +fu+TmepPWwPeQKYPp6YwBB5qNvG90AdwyAUabW23khDZM0vBFBv+n1oZRHqJ8oqW +uze4XCHJfUn5+lmukNTkul78 +-----END PRIVATE KEY----- diff --git a/qa/os/src/test/resources/org/elasticsearch/packaging/test/http_ca.crt b/qa/os/src/test/resources/org/elasticsearch/packaging/test/http_ca.crt new file mode 100644 index 0000000000000..350fe97f9c882 --- /dev/null +++ b/qa/os/src/test/resources/org/elasticsearch/packaging/test/http_ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIVALA5mjBbdcSBX/AX5ugQy+gbiBJwMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTIyMDExOTA5MDIzM1oXDTI1MDExODA5MDIzM1owNDEyMDAG +A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCfQ7Br31QqYFaXjWYKG8Vh +FnPMnZGAT3L9xW7TdBQ1vlp3pnv77vMg0NZXLLx7FUp5HzZj/I2mUdADTxL/fWg5 +WCtPH6UzFFimk8H2v30OFGSGkdIB6tAXuesuZBihIhIb14OY4btBWoyUwOdMgRX8 +SAzFq+zpq3P49Aiv9tU7icXJyrD2wZCIS0L/nogjIFXXnmUQLFYfVlm7xFQnFTqw +sdTpKthkgQyV6hYaCInktP+X+osOrlnOqHWpRpqgqqj1OB/TqocACpgH1Wmgt0F+ +IR0acVWR1jV0EbSL15i0QTRFgw4/7AbXXf8SKtkhw+SP+epyjDsh9mA1gSiT5q1t +AgMBAAGjUzBRMB0GA1UdDgQWBBSouO0kAGN6VSErE0jElIB7IQyvpDAfBgNVHSME +GDAWgBSouO0kAGN6VSErE0jElIB7IQyvpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQB1tuaFxErPbAlVojdRTFbosqoNRS4kcXhKO3Evk4h9yqkH +kplWPv+5/0PRFycYu9eZau0Gghzsd7ePcra8WLLwFPofuJad6wefWvbb0qGZmsi+ +yQW8/CGWTVVjJZPc1WMElP4eLvMhPrdS2Wioq2s4b9vYHBUHxLrDsx9dr4A4s4Yw +/dt0b15KrscNRXdM0rnvhAghh6grZ+P9lg4wyDEYr3e3ZUROPBWBT/yjveNOLj7n +7M28rgVkAvKzqtb3shLQL4UnsQJfB67sKpruIt+VjecUaTjvLyYaH4NvnlvqOIr3 +Eg+gjpSRGnatAzgwBHx5WYU4FTKfGdrmO81kngyA +-----END CERTIFICATE----- diff --git a/qa/os/src/test/resources/org/elasticsearch/packaging/test/http_ca.key b/qa/os/src/test/resources/org/elasticsearch/packaging/test/http_ca.key new file mode 100644 index 0000000000000..9d09abebe9633 --- /dev/null +++ b/qa/os/src/test/resources/org/elasticsearch/packaging/test/http_ca.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCfQ7Br31QqYFaX +jWYKG8VhFnPMnZGAT3L9xW7TdBQ1vlp3pnv77vMg0NZXLLx7FUp5HzZj/I2mUdAD +TxL/fWg5WCtPH6UzFFimk8H2v30OFGSGkdIB6tAXuesuZBihIhIb14OY4btBWoyU +wOdMgRX8SAzFq+zpq3P49Aiv9tU7icXJyrD2wZCIS0L/nogjIFXXnmUQLFYfVlm7 +xFQnFTqwsdTpKthkgQyV6hYaCInktP+X+osOrlnOqHWpRpqgqqj1OB/TqocACpgH +1Wmgt0F+IR0acVWR1jV0EbSL15i0QTRFgw4/7AbXXf8SKtkhw+SP+epyjDsh9mA1 +gSiT5q1tAgMBAAECggEAGBtMTb+uQy3K3ZdbAxuvVXdAYY/WHBL/3QFF5FQf0lS0 +59h3etZGecidMPEbZGaqxkj0WCURocOkwI4hr4dhamX57uOOLfxIljTWTqLZwHL+ +Fk+7IcF+QaEew9UF1K9OCLsnIacaoL/zzc5Mabbo8i32X53wNxxVv7CHIhoIA5dP +9PSRDFXo5ZGDZ8s4ZVWDYyG9nPE42DF/DHk/aS7wGHuao1cQC+dQq73ZaJbT/1eT +PM6a87TLUpcfTzJsvVK2dgTtYSEPXKRhX140+gO9Nj65RsCvUh3OF6k7IEFrF8fW +rWb2oB5BhXPfr/pv5sselNvRcEd9kELJsotzcVZzBwKBgQC/OLPg3yxhi/TDEqqm +YR38U/h3raJmicWGFITkomIOfZSZEkqPgmKXiAUrWSQuhMUCv/Ukm/phq6A+u3vB +/Zp/hRbcw/lbemexd7eK9sQWHNT5avtH5Jykbt2eKW9oGK7URlZTTNFfMdfwf6Ug +M7G9otavpo7J8RjvojVqdM6o0wKBgQDVN5KxxYSXohfxymGPfzteWoWGVDl77tB5 +JAeCwlkMPWkIt6UEj3enC4hImyozdt1wZsjXiefYPPt6JuE32Faf4Z5b8SWaMIt2 +mzUQybvtozOF/AOTlLAYrwajbqoDpaAVddTGgBpkqM/8WRB9NhAe73MuKokNh/Z4 +HYSc3btovwKBgF7rsr7piYpSgwlidrtocg4TUL93vanQsfBAt0cXJD21MNJbNg/O +1UoLByXhdghxd6pJx1KI6t0Y6M67Gk/Np6etVQ/5aAp01IxPtRiSLfwcC8xTwrCJ +Vwh40CH2x4qQ1hghYjCvbS8n+t0zP7CgeJZ+ArvmnfK4b3xktMdHfF5pAoGAEgRq +Lv+D3tRhtZrmd5vGxohbtCg121Uo5LO4tsH5iGFAGO570VDWSZVd5NTH0iV/PNpS +qnQK1WkBzyat7WwolcYY/af5B3iGsHUZHNwPN1uNJQtQug3ce5l+tBzL3RcH2ghk +/IkisdLaEHbuP8ZrwlF1qDcL8crFdwz3gdHy3j0CgYEAhpZ/FWrf1eaXk91D2Xba +kXeAFNENAsNgwubHhFlGUm0TxzkZYimrdAzqL9mpPl08r9awR1nfglfeW+n4vLB/ +9zHcegXxdK9hbxR3ES6jAX07tQXtBPNNMsHwqugfYF9acKrPDTXO1OOHOKz4/vy2 +cvt1BUtzvB57eQd2QtXhXCo= +-----END PRIVATE KEY----- diff --git a/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport.crt b/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport.crt new file mode 100644 index 0000000000000..d3fcfef93c6f6 --- /dev/null +++ b/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDIjCCAgqgAwIBAgIUS83knQ28f817BNKYxsKC9S2achYwDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMjIwMTE5MDkwNTUxWhcNMjUwMTE4MDkwNTUxWjATMREwDwYD +VQQDEwhpbnN0YW5jZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK8Z +dERaKHI8+9BOmWo6cD20bSkNTWwJI9BhqT4Cxj/zBmK06mOwqHqfw5DZMRFUOuCU +BeDlX35vr+2eEcWnXWA0zkYPFBwN2ZXtae1Cjyo+JN34onsF2BwpBbAz1BwABs+Z +W9tsrlkRvJ1Msfxr2DJOjJK5vZVF89DCzS7qqALlSlJMfvPGj+wBjf3MoS1AFIIu +UGBOKtbVEOVVchvZ5VA3wSsgaT94/T5ISFjVnSR572PJJJ7ve2K5Z8crClscDQHE +mxxHRnuqgVbBOe3K2ltMvOtpCTOSSP2u1H1EAW9IH1KSYB51E6Ob6E/R3qY7ukrr +ipZ06TVK6wC3rwaVRQMCAwEAAaNNMEswHQYDVR0OBBYEFNBu7h1/U3tr51nO9frB +NE9fsYyKMB8GA1UdIwQYMBaAFKi47SQAY3pVISsTSMSUgHshDK+kMAkGA1UdEwQC +MAAwDQYJKoZIhvcNAQELBQADggEBACV6DaVgMpfNRRMY1xM3G1fJeSXt7sZQxTUM +IwpzKvFpoUo8Qcz5ZVW0ZJ0syoPZcnDjYBCM4HfcI7T5tCNH2TFWbRjacjNfu2gz +p8NycN8proqKKnNDRr5XqRqJvzaU4OfNXIbkKY1B5MZJsJWB5CNMGBfrLfKu/rhl +kdxndwa+eTJCHcJBGauZmQ8wgqhn8UIUv9+VLVjyDMA3AgtUvwYgKKYIKF4Ev0XZ +b7RxIQ1c+h+/hkvzDP5KOTkr1Ri6tCIMaCz3Bosk8CfwNQDFGHa+vabm98wQTBmI +Ke4hGkuAX/crqzFruWkQ0Lw6r4ZjD2/I6ZKKj+BQLmiQAfQ5l/A= +-----END CERTIFICATE----- diff --git a/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport.key b/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport.key new file mode 100644 index 0000000000000..df4f08fde07c4 --- /dev/null +++ b/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCvGXREWihyPPvQ +TplqOnA9tG0pDU1sCSPQYak+AsY/8wZitOpjsKh6n8OQ2TERVDrglAXg5V9+b6/t +nhHFp11gNM5GDxQcDdmV7WntQo8qPiTd+KJ7BdgcKQWwM9QcAAbPmVvbbK5ZEbyd +TLH8a9gyToySub2VRfPQws0u6qgC5UpSTH7zxo/sAY39zKEtQBSCLlBgTirW1RDl +VXIb2eVQN8ErIGk/eP0+SEhY1Z0kee9jySSe73tiuWfHKwpbHA0BxJscR0Z7qoFW +wTntytpbTLzraQkzkkj9rtR9RAFvSB9SkmAedROjm+hP0d6mO7pK64qWdOk1SusA +t68GlUUDAgMBAAECggEAQUuVrtOMF6WkJbbZwOJP7sK0nkVmpK333AI/MW+pbDl9 +HRvn+ArtmOSw8ff8M0Ecv5iTZJ1EcgrGKS7z46gYywKGFVwUHK4RgLZK9P4IEzGf +X+MS/BtezomBpIZ/R32/DHVc6sNpdK+HjYUHLmEs+v2NsD1xdBPk6ulpyGAbDRDD +y+xofqz7RlQhAaXtbdgz3oH1Waz5uBIcy7a576e6X3GZSjq8sCgRgIAYnMSXxMJC +VW5pl4S+sk3XeDK0G10ur7Bb/4BclWQCqbgkdQ9sgGn5/JYFcAOD/g+VriKSrM9Q +DdUMZgFuJVyCeexMW4W3MIXj7A4DT0TfbhwA9cktaQKBgQDFOTq5MuHzXDRvT0ui +almzptvhlZFw1esehXHh870xK6K3oHNNisT4v6UCX/aSWXrWapONx7cWUl+DbKZZ +8Qj4SdA0tIQ1qrZn8aYZAYbt5dVgNhHHntg/0wQf/dR5/szgFp4Q47SDpATaNPOd +04QaGsEpAQ5lI2+5g7ZAXmJbhwKBgQDjSE9PH1+BaDjk7KyPZJNrOuX0XP8/4oRZ +vok9hoGkHT/7idEkryINGWaJqdW0COoNXCDDuiClQ9dFN09QxBAQR0uA4JPajCsE +9f/BY2OSy0XfrsB/tUtNyxC8IGc8G/VTJVEQUNMvgRt4116HIjOHG70EUyAb9IYu +Tdei0zVBpQKBgQCZ+F/cDdlQgH9/FszZc2WsV2v55SayjI8OOOf7mqntJT/XU7Aw +rVGxUQylmf2Jq8m2c2XWnkBVcOGYXM5SEVcLX7ToMLW2oBvfckxV4VdRisjWX9/p +lB0HVto1j5i91SplF8M3NE3NJ9OR9xzp7iOHrbN3K5ftZYjnr+gswILRKQKBgQDf +yyykmRy+XHRhHZZQX1U1KbqR4hxHuHBfudiC53WxtkdxE+QGvhfGVDN96+gMLRbh +bsyoTRamBGXstqh3u4ahsMHstbWAZbJaYSujLY03VsaHmRfc6BOtFv10cGeWbWUj +qMPoT92SkgsN8usWHpwkNjDpGDyuqhiRcX6ZymRPLQKBgC6+P5ijHZ4ewaEGpgzW +fFh18MYGfFQDiFGijM1f/RmouB1im2pXsOFrzOfKENVfizhYw9yQKf+/Bg0tXY0Q +NWCq9WVRrx7GZKfUYLfAI9I/vCviRNdHNabnWbYE3FbG04C/lJeHt0VljDa61Ve6 +Oxqu0QUOd+JzRfuHISztZNWy +-----END PRIVATE KEY----- diff --git a/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport_ca.crt b/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport_ca.crt new file mode 100644 index 0000000000000..350fe97f9c882 --- /dev/null +++ b/qa/os/src/test/resources/org/elasticsearch/packaging/test/transport_ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIVALA5mjBbdcSBX/AX5ugQy+gbiBJwMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTIyMDExOTA5MDIzM1oXDTI1MDExODA5MDIzM1owNDEyMDAG +A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCfQ7Br31QqYFaXjWYKG8Vh +FnPMnZGAT3L9xW7TdBQ1vlp3pnv77vMg0NZXLLx7FUp5HzZj/I2mUdADTxL/fWg5 +WCtPH6UzFFimk8H2v30OFGSGkdIB6tAXuesuZBihIhIb14OY4btBWoyUwOdMgRX8 +SAzFq+zpq3P49Aiv9tU7icXJyrD2wZCIS0L/nogjIFXXnmUQLFYfVlm7xFQnFTqw +sdTpKthkgQyV6hYaCInktP+X+osOrlnOqHWpRpqgqqj1OB/TqocACpgH1Wmgt0F+ +IR0acVWR1jV0EbSL15i0QTRFgw4/7AbXXf8SKtkhw+SP+epyjDsh9mA1gSiT5q1t +AgMBAAGjUzBRMB0GA1UdDgQWBBSouO0kAGN6VSErE0jElIB7IQyvpDAfBgNVHSME +GDAWgBSouO0kAGN6VSErE0jElIB7IQyvpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQB1tuaFxErPbAlVojdRTFbosqoNRS4kcXhKO3Evk4h9yqkH +kplWPv+5/0PRFycYu9eZau0Gghzsd7ePcra8WLLwFPofuJad6wefWvbb0qGZmsi+ +yQW8/CGWTVVjJZPc1WMElP4eLvMhPrdS2Wioq2s4b9vYHBUHxLrDsx9dr4A4s4Yw +/dt0b15KrscNRXdM0rnvhAghh6grZ+P9lg4wyDEYr3e3ZUROPBWBT/yjveNOLj7n +7M28rgVkAvKzqtb3shLQL4UnsQJfB67sKpruIt+VjecUaTjvLyYaH4NvnlvqOIr3 +Eg+gjpSRGnatAzgwBHx5WYU4FTKfGdrmO81kngyA +-----END CERTIFICATE----- diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index c626a7573b6b8..0b9bad2b705ce 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -19,7 +19,7 @@ dependencies { testImplementation project(':client:rest-high-level') } -BuildParams.bwcVersions.withIndexCompatiple { bwcVersion, baseName -> +BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> String oldClusterName = "${baseName}-old" String newClusterName = "${baseName}-new" diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index 1445feaeb2bbd..1753e77376076 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -75,18 +75,13 @@ public String toString() { } public static TestStep parse(String value) { - switch (value) { - case "step1": - return STEP1_OLD_CLUSTER; - case "step2": - return STEP2_NEW_CLUSTER; - case "step3": - return STEP3_OLD_CLUSTER; - case "step4": - return STEP4_NEW_CLUSTER; - default: - throw new AssertionError("unknown test step: " + value); - } + return switch (value) { + case "step1" -> STEP1_OLD_CLUSTER; + case "step2" -> STEP2_NEW_CLUSTER; + case "step3" -> STEP3_OLD_CLUSTER; + case "step4" -> STEP4_NEW_CLUSTER; + default -> throw new AssertionError("unknown test step: " + value); + }; } } @@ -102,7 +97,7 @@ protected boolean preserveReposUponCompletion() { return true; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/80088") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/82569") public void testCreateAndRestoreSnapshot() throws IOException { final String repoName = getTestName(); try (RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(adminClient().getNodes().toArray(new Node[0])))) { @@ -126,20 +121,18 @@ public void testCreateAndRestoreSnapshot() throws IOException { final List> snapshots = listSnapshots(repoName); assertThat(snapshots, hasSize(TEST_STEP.ordinal() + 1)); switch (TEST_STEP) { - case STEP2_NEW_CLUSTER: - case STEP4_NEW_CLUSTER: - assertSnapshotStatusSuccessful( - client, - repoName, - snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new) - ); - break; - case STEP1_OLD_CLUSTER: - assertSnapshotStatusSuccessful(client, repoName, "snapshot-" + TEST_STEP); - break; - case STEP3_OLD_CLUSTER: - assertSnapshotStatusSuccessful(client, repoName, "snapshot-" + TEST_STEP, "snapshot-" + TestStep.STEP3_OLD_CLUSTER); - break; + case STEP2_NEW_CLUSTER, STEP4_NEW_CLUSTER -> assertSnapshotStatusSuccessful( + client, + repoName, + snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new) + ); + case STEP1_OLD_CLUSTER -> assertSnapshotStatusSuccessful(client, repoName, "snapshot-" + TEST_STEP); + case STEP3_OLD_CLUSTER -> assertSnapshotStatusSuccessful( + client, + repoName, + "snapshot-" + TEST_STEP, + "snapshot-" + TestStep.STEP3_OLD_CLUSTER + ); } if (TEST_STEP == TestStep.STEP3_OLD_CLUSTER) { ensureSnapshotRestoreWorks(client, repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER, shards, index); @@ -153,7 +146,7 @@ public void testCreateAndRestoreSnapshot() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/80088") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/82569") public void testReadOnlyRepo() throws IOException { final String repoName = getTestName(); try (RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(adminClient().getNodes().toArray(new Node[0])))) { @@ -167,14 +160,8 @@ public void testReadOnlyRepo() throws IOException { } final List> snapshots = listSnapshots(repoName); switch (TEST_STEP) { - case STEP1_OLD_CLUSTER: - assertThat(snapshots, hasSize(1)); - break; - case STEP2_NEW_CLUSTER: - case STEP4_NEW_CLUSTER: - case STEP3_OLD_CLUSTER: - assertThat(snapshots, hasSize(2)); - break; + case STEP1_OLD_CLUSTER -> assertThat(snapshots, hasSize(1)); + case STEP2_NEW_CLUSTER, STEP4_NEW_CLUSTER, STEP3_OLD_CLUSTER -> assertThat(snapshots, hasSize(2)); } if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER || TEST_STEP == TestStep.STEP3_OLD_CLUSTER) { assertSnapshotStatusSuccessful(client, repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER); @@ -200,7 +187,7 @@ public void testReadOnlyRepo() throws IOException { ElasticsearchStatusException.class ); - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/80088") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/82569") public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { final String repoName = getTestName(); try (RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(adminClient().getNodes().toArray(new Node[0])))) { @@ -324,18 +311,15 @@ private static void createSnapshot(RestHighLevelClient client, String repoName, private void createIndex(RestHighLevelClient client, String name, int shards) throws IOException { final Request putIndexRequest = new Request("PUT", "/" + name); - putIndexRequest.setJsonEntity( - "{\n" - + " \"settings\" : {\n" - + " \"index\" : {\n" - + " \"number_of_shards\" : " - + shards - + ", \n" - + " \"number_of_replicas\" : 0 \n" - + " }\n" - + " }\n" - + "}" - ); + putIndexRequest.setJsonEntity(""" + { + "settings" : { + "index" : { + "number_of_shards" : %s, + "number_of_replicas" : 0 + } + } + }""".formatted(shards)); final Response response = client.getLowLevelClient().performRequest(putIndexRequest); assertThat(response.getStatusLine().getStatusCode(), is(HttpURLConnection.HTTP_OK)); } diff --git a/qa/repository-old-versions/build.gradle b/qa/repository-old-versions/build.gradle deleted file mode 100644 index c2a745f887e50..0000000000000 --- a/qa/repository-old-versions/build.gradle +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - - -import org.apache.tools.ant.taskdefs.condition.Os -import org.elasticsearch.gradle.Architecture -import org.elasticsearch.gradle.OS -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.AntFixture -import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask - -apply plugin: 'elasticsearch.jdk-download' -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' - -configurations { - oldesFixture -} - -dependencies { - oldesFixture project(':test:fixtures:old-elasticsearch') - testImplementation project(':client:rest-high-level') -} - -jdks { - legacy { - vendor = 'adoptium' - version = '8u302+b08' - platform = OS.current().name().toLowerCase() - architecture = Architecture.current().name().toLowerCase() - } -} - -if (Os.isFamily(Os.FAMILY_WINDOWS)) { - logger.warn("Disabling repository-old-versions tests because we can't get the pid file on windows") - tasks.named("testingConventions").configure { enabled = false } -} else { - /* Set up tasks to unzip and run the old versions of ES before running the integration tests. - * To avoid testing against too many old versions, always pick first and last version per major - */ - for (String versionString : ['5.0.0', '5.6.16', '6.0.0', '6.8.20']) { - Version version = Version.fromString(versionString) - String packageName = 'org.elasticsearch.distribution.zip' - String artifact = "${packageName}:elasticsearch:${version}@zip" - String versionNoDots = version.toString().replace('.', '_') - String configName = "es${versionNoDots}" - - configurations.create(configName) - - dependencies.add(configName, artifact) - - // TODO Rene: we should be able to replace these unzip tasks with gradle artifact transforms - TaskProvider unzip = tasks.register("unzipEs${versionNoDots}", Sync) { - Configuration oldEsDependency = configurations[configName] - dependsOn oldEsDependency - /* Use a closure here to delay resolution of the dependency until we need - * it */ - from { - oldEsDependency.collect { zipTree(it) } - } - into temporaryDir - } - - String repoLocation = "${buildDir}/cluster/shared/repo/${versionNoDots}" - - String clusterName = versionNoDots - - def testClusterProvider = testClusters.register(clusterName) { - setting 'path.repo', repoLocation - setting 'xpack.security.enabled', 'false' - } - - TaskProvider fixture = tasks.register("oldES${versionNoDots}Fixture", AntFixture) { - dependsOn project.configurations.oldesFixture, jdks.legacy - dependsOn unzip - executable = "${BuildParams.runtimeJavaHome}/bin/java" - env 'CLASSPATH', "${-> project.configurations.oldesFixture.asPath}" - // old versions of Elasticsearch need JAVA_HOME - env 'JAVA_HOME', jdks.legacy.javaHomePath - // If we are running on certain arm systems we need to explicitly set the stack size to overcome JDK page size bug - if (Architecture.current() == Architecture.AARCH64) { - env 'ES_JAVA_OPTS', '-Xss512k' - } - args 'oldes.OldElasticsearch', - baseDir, - unzip.get().temporaryDir, - false, - "path.repo: ${repoLocation}" - if (version.onOrAfter('6.8.0') && Architecture.current() == Architecture.AARCH64) { - // We need to explicitly disable ML when running old ES versions on ARM - args 'xpack.ml.enabled: false' - } - maxWaitInSeconds 60 - waitCondition = { fixture, ant -> - // the fixture writes the ports file when Elasticsearch's HTTP service - // is ready, so we can just wait for the file to exist - return fixture.portsFile.exists() - } - } - - tasks.register("javaRestTest#${versionNoDots}", StandaloneRestIntegTestTask) { - useCluster testClusterProvider - dependsOn fixture - doFirst { - delete(repoLocation) - mkdir(repoLocation) - } - systemProperty "tests.repo.location", repoLocation - systemProperty "tests.es.version", version.toString() - /* Use a closure on the string to delay evaluation until right before we - * run the integration tests so that we can be sure that the file is - * ready. */ - nonInputProperties.systemProperty "tests.es.port", "${-> fixture.get().addressAndPort}" - nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusterProvider.get().allHttpSocketURI.join(",")}") - nonInputProperties.systemProperty('tests.clustername', "${-> testClusterProvider.get().getName()}") - } - - tasks.named("check").configure { - dependsOn "javaRestTest#${versionNoDots}" - } - } -} - diff --git a/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java deleted file mode 100644 index 140ef92f9507f..0000000000000 --- a/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.oldrepos; - -import org.apache.http.HttpHost; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.elasticsearch.client.Node; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.cluster.SnapshotsInProgress; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.snapshots.SnapshotInfo; -import org.elasticsearch.snapshots.SnapshotState; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.test.rest.ESRestTestCase; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasSize; - -public class OldRepositoryAccessIT extends ESRestTestCase { - @Override - protected Map>> wipeSnapshots() { - return Collections.emptyMap(); - } - - @SuppressWarnings("removal") - public void testOldRepoAccess() throws IOException { - String repoLocation = System.getProperty("tests.repo.location"); - Version oldVersion = Version.fromString(System.getProperty("tests.es.version")); - - int oldEsPort = Integer.parseInt(System.getProperty("tests.es.port")); - try ( - RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(adminClient().getNodes().toArray(new Node[0]))); - RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build() - ) { - try { - Request createIndex = new Request("PUT", "/test"); - int numberOfShards = randomIntBetween(1, 3); - createIndex.setJsonEntity("{\"settings\":{\"number_of_shards\": " + numberOfShards + "}}"); - oldEs.performRequest(createIndex); - - for (int i = 0; i < 5; i++) { - Request doc = new Request("PUT", "/test/doc/testdoc" + i); - doc.addParameter("refresh", "true"); - doc.setJsonEntity("{\"test\":\"test" + i + "\", \"val\":" + i + "}"); - oldEs.performRequest(doc); - } - - // register repo on old ES and take snapshot - Request createRepoRequest = new Request("PUT", "/_snapshot/testrepo"); - createRepoRequest.setJsonEntity("{\"type\":\"fs\",\"settings\":{\"location\":\"" + repoLocation + "\"}}"); - oldEs.performRequest(createRepoRequest); - - Request createSnapshotRequest = new Request("PUT", "/_snapshot/testrepo/snap1"); - createSnapshotRequest.addParameter("wait_for_completion", "true"); - createSnapshotRequest.setJsonEntity("{\"indices\":\"test\"}"); - oldEs.performRequest(createSnapshotRequest); - - // register repo on new ES - ElasticsearchAssertions.assertAcked( - client.snapshot() - .createRepository( - new PutRepositoryRequest("testrepo").type("fs") - .settings(Settings.builder().put("location", repoLocation).build()), - RequestOptions.DEFAULT - ) - ); - - // list snapshots on new ES - List snapshotInfos = client.snapshot() - .get(new GetSnapshotsRequest("testrepo").snapshots(new String[] { "_all" }), RequestOptions.DEFAULT) - .getSnapshots(); - assertThat(snapshotInfos, hasSize(1)); - SnapshotInfo snapshotInfo = snapshotInfos.get(0); - assertEquals("snap1", snapshotInfo.snapshotId().getName()); - assertEquals("testrepo", snapshotInfo.repository()); - assertEquals(Arrays.asList("test"), snapshotInfo.indices()); - assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); - assertEquals(numberOfShards, snapshotInfo.successfulShards()); - assertEquals(numberOfShards, snapshotInfo.totalShards()); - assertEquals(0, snapshotInfo.failedShards()); - assertEquals(oldVersion, snapshotInfo.version()); - - // list specific snapshot on new ES - snapshotInfos = client.snapshot() - .get(new GetSnapshotsRequest("testrepo").snapshots(new String[] { "snap1" }), RequestOptions.DEFAULT) - .getSnapshots(); - assertThat(snapshotInfos, hasSize(1)); - snapshotInfo = snapshotInfos.get(0); - assertEquals("snap1", snapshotInfo.snapshotId().getName()); - assertEquals("testrepo", snapshotInfo.repository()); - assertEquals(Arrays.asList("test"), snapshotInfo.indices()); - assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); - assertEquals(numberOfShards, snapshotInfo.successfulShards()); - assertEquals(numberOfShards, snapshotInfo.totalShards()); - assertEquals(0, snapshotInfo.failedShards()); - assertEquals(oldVersion, snapshotInfo.version()); - - // list advanced snapshot info on new ES - SnapshotsStatusResponse snapshotsStatusResponse = client.snapshot() - .status(new SnapshotsStatusRequest("testrepo").snapshots(new String[] { "snap1" }), RequestOptions.DEFAULT); - assertThat(snapshotsStatusResponse.getSnapshots(), hasSize(1)); - SnapshotStatus snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0); - assertEquals("snap1", snapshotStatus.getSnapshot().getSnapshotId().getName()); - assertEquals("testrepo", snapshotStatus.getSnapshot().getRepository()); - assertEquals(Sets.newHashSet("test"), snapshotStatus.getIndices().keySet()); - assertEquals(SnapshotsInProgress.State.SUCCESS, snapshotStatus.getState()); - assertEquals(numberOfShards, snapshotStatus.getShardsStats().getDoneShards()); - assertEquals(numberOfShards, snapshotStatus.getShardsStats().getTotalShards()); - assertEquals(0, snapshotStatus.getShardsStats().getFailedShards()); - assertThat(snapshotStatus.getStats().getTotalSize(), greaterThan(0L)); - assertThat(snapshotStatus.getStats().getTotalFileCount(), greaterThan(0)); - } finally { - oldEs.performRequest(new Request("DELETE", "/test")); - } - } - } - -} diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index beffb18db7a0c..b5c4d5b99f3fc 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -14,7 +14,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' -BuildParams.bwcVersions.withWireCompatiple { bwcVersion, baseName -> +BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> /* * The goal here is to: *
    diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index c905ebf65a01a..768274e817c13 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -18,16 +18,12 @@ protected enum ClusterType { UPGRADED; public static ClusterType parse(String value) { - switch (value) { - case "old_cluster": - return OLD; - case "mixed_cluster": - return MIXED; - case "upgraded_cluster": - return UPGRADED; - default: - throw new AssertionError("unknown cluster type: " + value); - } + return switch (value) { + case "old_cluster" -> OLD; + case "mixed_cluster" -> MIXED; + case "upgraded_cluster" -> UPGRADED; + default -> throw new AssertionError("unknown cluster type: " + value); + }; } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java index 8d46490608652..4f7b1da59b812 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java @@ -36,21 +36,23 @@ public void testGetFeatureUpgradeStatus() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setJsonEntity("{\"index\": {\"_index\": \"feature_test_index_old\"}}\n" + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); + bulk.setJsonEntity(""" + {"index": {"_index": "feature_test_index_old"}} + {"f1": "v1", "f2": "v2"} + """); client().performRequest(bulk); // start a async reindex job Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity( - "{\n" - + " \"source\":{\n" - + " \"index\":\"feature_test_index_old\"\n" - + " },\n" - + " \"dest\":{\n" - + " \"index\":\"feature_test_index_reindex\"\n" - + " }\n" - + "}" - ); + reindex.setJsonEntity(""" + { + "source":{ + "index":"feature_test_index_old" + }, + "dest":{ + "index":"feature_test_index_reindex" + } + }"""); reindex.addParameter("wait_for_completion", "false"); Map response = entityAsMap(client().performRequest(reindex)); String taskId = (String) response.get("task"); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index 21367aba17978..2adc334983c93 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -7,17 +7,17 @@ */ package org.elasticsearch.upgrades; -import io.github.nik9000.mapmatcher.ListMatcher; - import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.test.ListMatcher; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -29,10 +29,10 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static io.github.nik9000.mapmatcher.ListMatcher.matchesList; -import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; -import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; +import static org.elasticsearch.test.ListMatcher.matchesList; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; @@ -135,26 +135,25 @@ public void testIndexing() throws IOException { public void testAutoIdWithOpTypeCreate() throws IOException { final String indexName = "auto_id_and_op_type_create_index"; - StringBuilder b = new StringBuilder(); - b.append("{\"create\": {\"_index\": \"").append(indexName).append("\"}}\n"); - b.append("{\"f1\": \"v\"}\n"); + String b = """ + {"create": {"_index": "%s"}} + {"f1": "v"} + """.formatted(indexName); Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setJsonEntity(b.toString()); + bulk.setJsonEntity(b); switch (CLUSTER_TYPE) { - case OLD: + case OLD -> { Request createTestIndex = new Request("PUT", "/" + indexName); createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); client().performRequest(createTestIndex); - break; - case MIXED: + } + case MIXED -> { Request waitForGreen = new Request("GET", "/_cluster/health"); waitForGreen.addParameter("wait_for_nodes", "3"); client().performRequest(waitForGreen); - Version minNodeVersion = minNodeVersion(); - if (minNodeVersion.before(Version.V_7_5_0)) { ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); @@ -168,19 +167,16 @@ public void testAutoIdWithOpTypeCreate() throws IOException { } else { client().performRequest(bulk); } - break; - case UPGRADED: - client().performRequest(bulk); - break; - default: - throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + case UPGRADED -> client().performRequest(bulk); + default -> throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); } } public void testDateNanosFormatUpgrade() throws IOException { final String indexName = "test_date_nanos"; switch (CLUSTER_TYPE) { - case OLD: + case OLD -> { Request createIndex = new Request("PUT", "/" + indexName); XContentBuilder mappings = XContentBuilder.builder(XContentType.JSON.xContent()) .startObject() @@ -197,7 +193,6 @@ public void testDateNanosFormatUpgrade() throws IOException { .endObject(); createIndex.setJsonEntity(Strings.toString(mappings)); client().performRequest(createIndex); - Request index = new Request("POST", "/" + indexName + "/_doc/"); XContentBuilder doc = XContentBuilder.builder(XContentType.JSON.xContent()) .startObject() @@ -207,9 +202,8 @@ public void testDateNanosFormatUpgrade() throws IOException { index.addParameter("refresh", "true"); index.setJsonEntity(Strings.toString(doc)); client().performRequest(index); - break; - - case UPGRADED: + } + case UPGRADED -> { Request search = new Request("POST", "/" + indexName + "/_search"); XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) .startObject() @@ -217,19 +211,14 @@ public void testDateNanosFormatUpgrade() throws IOException { .endObject(); search.setJsonEntity(Strings.toString(query)); Map response = entityAsMap(client().performRequest(search)); - Map bestHit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", response))).get(0); List date = (List) XContentMapValues.extractValue("fields.date", bestHit); assertThat(date.size(), equalTo(1)); assertThat(date.get(0), equalTo("2015-01-01T12:10:30.123Z")); - List dateNanos = (List) XContentMapValues.extractValue("fields.date_nanos", bestHit); assertThat(dateNanos.size(), equalTo(1)); assertThat(dateNanos.get(0), equalTo("2015-01-01T12:10:30.123456789Z")); - break; - - default: - break; + } } } @@ -258,18 +247,19 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio } public void testTsdb() throws IOException { - assumeTrue("tsdb added in 8.0.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_0_0)); + assumeTrue("sort by _tsid added in 8.1.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_1_0)); StringBuilder bulk = new StringBuilder(); switch (CLUSTER_TYPE) { - case OLD: + case OLD -> { createTsdbIndex(); tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[0], TSDB_TIMES[1], 0.1); tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[0], TSDB_TIMES[1], -0.1); bulk("tsdb", bulk.toString()); assertTsdbAgg(closeTo(215.95, 0.005), closeTo(-215.95, 0.005)); return; - case MIXED: + } + case MIXED -> { if (FIRST_MIXED_ROUND) { tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[1], TSDB_TIMES[2], 0.1); tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[1], TSDB_TIMES[2], -0.1); @@ -285,7 +275,8 @@ public void testTsdb() throws IOException { bulk("tsdb", bulk.toString()); assertTsdbAgg(closeTo(218.95, 0.005), closeTo(-218.95, 0.005), closeTo(2408.45, 0.005), closeTo(21895, 0.5)); return; - case UPGRADED: + } + case UPGRADED -> { tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[3], TSDB_TIMES[4], 0.1); tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[3], TSDB_TIMES[4], -0.1); tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[3], TSDB_TIMES[4], 1.1); @@ -300,6 +291,7 @@ public void testTsdb() throws IOException { closeTo(-11022.5, 0.5) ); return; + } } } @@ -322,6 +314,8 @@ private void createTsdbIndex() throws IOException { indexSpec.startObject("settings").startObject("index"); indexSpec.field("mode", "time_series"); indexSpec.array("routing_path", new String[] { "dim" }); + indexSpec.field("time_series.start_time", 1L); + indexSpec.field("time_series.end_time", DateUtils.MAX_MILLIS_BEFORE_9999 - 1); indexSpec.endObject().endObject(); createIndex.setJsonEntity(Strings.toString(indexSpec.endObject())); client().performRequest(createIndex); @@ -331,10 +325,10 @@ private void tsdbBulk(StringBuilder bulk, String dim, long timeStart, long timeE long delta = TimeUnit.SECONDS.toMillis(20); double value = (timeStart - TSDB_TIMES[0]) / TimeUnit.SECONDS.toMillis(20) * rate; for (long t = timeStart; t < timeEnd; t += delta) { - bulk.append("{\"index\": {\"_index\": \"tsdb\"}}\n"); - bulk.append("{\"@timestamp\": ").append(t); - bulk.append(", \"dim\": \"").append(dim).append("\""); - bulk.append(", \"value\": ").append(value).append("}\n"); + bulk.append(""" + {"index": {"_index": "tsdb"}} + {"@timestamp": %s, "dim": "%s", "value": %s} + """.formatted(t, dim, value)); value += rate; } } @@ -343,20 +337,9 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { Request request = new Request("POST", "/tsdb/_search"); request.addParameter("size", "0"); XContentBuilder body = JsonXContent.contentBuilder().startObject(); - // TODO replace tsid runtime field with real tsid - body.startObject("runtime_mappings"); - { - body.startObject("tsid"); - { - body.field("type", "keyword"); - body.field("script", "emit('dim:' + doc['dim'].value)"); - } - body.endObject(); - } - body.endObject(); body.startObject("aggs").startObject("tsids"); { - body.startObject("terms").field("field", "tsid").endObject(); + body.startObject("terms").field("field", "_tsid").endObject(); body.startObject("aggs").startObject("avg"); { body.startObject("avg").field("field", "value").endObject(); @@ -367,8 +350,7 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { request.setJsonEntity(Strings.toString(body.endObject())); ListMatcher tsidsExpected = matchesList(); for (int d = 0; d < expected.length; d++) { - // Object key = Map.of("dim", TSDB_DIMS.get(d)); TODO use this once tsid is real - Object key = "dim:" + TSDB_DIMS.get(d); + Object key = Map.of("dim", TSDB_DIMS.get(d)); tsidsExpected = tsidsExpected.item(matchesMap().extraOk().entry("key", key).entry("avg", Map.of("value", expected[d]))); } assertMap( diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index f3e9df923433f..88b1bcdabdbe6 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -125,7 +125,7 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { List nodes = new ArrayList<>(nodeMap.keySet()); switch (CLUSTER_TYPE) { - case OLD: + case OLD -> { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) @@ -140,8 +140,8 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { ensureGreen(index); // make sure that we can index while the replicas are recovering updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "primaries")); - break; - case MIXED: + } + case MIXED -> { updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), (String) null)); asyncIndexDocs(index, 10, 50).get(); ensureGreen(index); @@ -151,8 +151,8 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { assertCount(index, "_only_nodes:" + nodes.get(2), 60); // make sure that we can index while the replicas are recovering updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "primaries")); - break; - case UPGRADED: + } + case UPGRADED -> { updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), (String) null)); asyncIndexDocs(index, 60, 45).get(); ensureGreen(index); @@ -160,9 +160,8 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { assertCount(index, "_only_nodes:" + nodes.get(0), 105); assertCount(index, "_only_nodes:" + nodes.get(1), 105); assertCount(index, "_only_nodes:" + nodes.get(2), 105); - break; - default: - throw new IllegalStateException("unknown type " + CLUSTER_TYPE); + } + default -> throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } } @@ -215,7 +214,7 @@ private String getNodeId(Predicate versionPredicate) throws IOException public void testRelocationWithConcurrentIndexing() throws Exception { final String index = "relocation_with_concurrent_indexing"; switch (CLUSTER_TYPE) { - case OLD: + case OLD -> { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) @@ -232,8 +231,8 @@ public void testRelocationWithConcurrentIndexing() throws Exception { // make sure that no shards are allocated, so we can make sure the primary stays on the old node (when one // node stops, we lose the master too, so a replica will not be promoted) updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")); - break; - case MIXED: + } + case MIXED -> { final String newNode = getNodeId(v -> v.equals(Version.CURRENT)); final String oldNode = getNodeId(v -> v.before(Version.CURRENT)); // remove the replica and guaranteed the primary is placed on the old node @@ -262,8 +261,8 @@ public void testRelocationWithConcurrentIndexing() throws Exception { ensureGreen(index); client().performRequest(new Request("POST", index + "/_refresh")); assertCount(index, "_only_nodes:" + newNode, 60); - break; - case UPGRADED: + } + case UPGRADED -> { updateIndexSettings( index, Settings.builder() @@ -278,13 +277,11 @@ public void testRelocationWithConcurrentIndexing() throws Exception { ObjectPath objectPath = ObjectPath.createFromResponse(response); final Map nodeMap = objectPath.evaluate("nodes"); List nodes = new ArrayList<>(nodeMap.keySet()); - assertCount(index, "_only_nodes:" + nodes.get(0), 105); assertCount(index, "_only_nodes:" + nodes.get(1), 105); assertCount(index, "_only_nodes:" + nodes.get(2), 105); - break; - default: - throw new IllegalStateException("unknown type " + CLUSTER_TYPE); + } + default -> throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } if (randomBoolean()) { flush(index, randomBoolean()); @@ -355,7 +352,7 @@ public void testRetentionLeasesEstablishedWhenPromotingPrimary() throws Exceptio public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Exception { final String index = "recover_and_create_leases_in_relocation"; switch (CLUSTER_TYPE) { - case OLD: + case OLD -> { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(1, 5)) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, 1)) @@ -371,9 +368,8 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti client().performRequest(new Request("POST", "/" + index + "/_flush")); } ensureGreen(index); - break; - - case MIXED: + } + case MIXED -> { // trigger a primary relocation by excluding the last old node with a shard filter final Map nodesMap = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_nodes"))) .evaluate("nodes"); @@ -385,7 +381,6 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti oldNodeNames.add((String) nodeDetailsMap.get("name")); } } - if (oldNodeNames.size() == 1) { final String oldNodeName = oldNodeNames.get(0); logger.info("--> excluding index [{}] from node [{}]", index, oldNodeName); @@ -395,12 +390,11 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti } ensureGreen(index); ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); - break; - - case UPGRADED: + } + case UPGRADED -> { ensureGreen(index); ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); - break; + } } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index c15ca37424add..94a6c3a744ad7 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -39,12 +39,13 @@ import static org.hamcrest.Matchers.notNullValue; public class SnapshotBasedRecoveryIT extends AbstractRollingTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/80939") public void testSnapshotBasedRecovery() throws Exception { final String indexName = "snapshot_based_recovery"; final String repositoryName = "snapshot_based_recovery_repo"; final int numDocs = 200; switch (CLUSTER_TYPE) { - case OLD: + case OLD -> { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) @@ -54,7 +55,6 @@ public void testSnapshotBasedRecovery() throws Exception { ensureGreen(indexName); indexDocs(indexName, numDocs); flush(indexName, true); - registerRepository( repositoryName, "fs", @@ -64,14 +64,11 @@ public void testSnapshotBasedRecovery() throws Exception { .put(BlobStoreRepository.USE_FOR_PEER_RECOVERY_SETTING.getKey(), true) .build() ); - createSnapshot(repositoryName, "snap", true); - updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)); ensureGreen(indexName); - break; - case MIXED: - case UPGRADED: + } + case MIXED, UPGRADED -> { if (FIRST_MIXED_ROUND) { String upgradedNodeId = getUpgradedNodeId(); @@ -99,14 +96,12 @@ public void testSnapshotBasedRecovery() throws Exception { // Drop replicas updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)); - updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)); ensureGreen(indexName); assertMatchAllReturnsAllDocuments(indexName, numDocs); assertMatchQueryReturnsAllDocuments(indexName, numDocs); - break; - default: - throw new IllegalStateException("unknown type " + CLUSTER_TYPE); + } + default -> throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java index f814f899ae3c5..b53766f70acb3 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java @@ -34,21 +34,23 @@ public void testSystemIndicesUpgrades() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}}\n" + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); + bulk.setJsonEntity(""" + {"index": {"_index": "test_index_old"}} + {"f1": "v1", "f2": "v2"} + """); client().performRequest(bulk); // start a async reindex job Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity( - "{\n" - + " \"source\":{\n" - + " \"index\":\"test_index_old\"\n" - + " },\n" - + " \"dest\":{\n" - + " \"index\":\"test_index_reindex\"\n" - + " }\n" - + "}" - ); + reindex.setJsonEntity(""" + { + "source":{ + "index":"test_index_old" + }, + "dest":{ + "index":"test_index_reindex" + } + }"""); reindex.addParameter("wait_for_completion", "false"); Map response = entityAsMap(client().performRequest(reindex)); String taskId = (String) response.get("task"); @@ -83,14 +85,13 @@ public void testSystemIndicesUpgrades() throws Exception { if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { // Create an alias to make sure it gets upgraded properly Request putAliasRequest = new Request("POST", "/_aliases"); - putAliasRequest.setJsonEntity( - "{\n" - + " \"actions\": [\n" - + " {\"add\": {\"index\": \".tasks\", \"alias\": \"test-system-alias\"}},\n" - + " {\"add\": {\"index\": \"test_index_reindex\", \"alias\": \"test-system-alias\"}}\n" - + " ]\n" - + "}" - ); + putAliasRequest.setJsonEntity(""" + { + "actions": [ + {"add": {"index": ".tasks", "alias": "test-system-alias"}}, + {"add": {"index": "test_index_reindex", "alias": "test-system-alias"}} + ] + }"""); putAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java index ecab6cfbc9808..40e63b4ae32d7 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -48,14 +48,20 @@ public void skipIfNotXPack() { */ public void testBasicFeature() throws IOException { Request bulk = new Request("POST", "/sql_test/_bulk"); - bulk.setJsonEntity("{\"index\":{}}\n" + "{\"f\": \"1\"}\n" + "{\"index\":{}}\n" + "{\"f\": \"2\"}\n"); + bulk.setJsonEntity(""" + {"index":{}} + {"f": "1"} + {"index":{}} + {"f": "2"} + """); bulk.addParameter("refresh", "true"); client().performRequest(bulk); Request sql = new Request("POST", "/_sql"); sql.setJsonEntity("{\"query\": \"SELECT * FROM sql_test WHERE f > 1 ORDER BY f ASC\"}"); String response = EntityUtils.toString(client().performRequest(sql).getEntity()); - assertEquals("{\"columns\":[{\"name\":\"f\",\"type\":\"text\"}],\"rows\":[[\"2\"]]}", response); + assertEquals(""" + {"columns":[{"name":"f","type":"text"}],"rows":[["2"]]}""", response); } /** @@ -77,23 +83,23 @@ public void testTrialLicense() throws IOException { assertEquals("{\"count\":0,\"jobs\":[]}", noJobs); Request createJob = new Request("PUT", "/_ml/anomaly_detectors/test_job"); - createJob.setJsonEntity( - "{\n" - + " \"analysis_config\" : {\n" - + " \"bucket_span\": \"10m\",\n" - + " \"detectors\": [\n" - + " {\n" - + " \"function\": \"sum\",\n" - + " \"field_name\": \"total\"\n" - + " }\n" - + " ]\n" - + " },\n" - + " \"data_description\": {\n" - + " \"time_field\": \"timestamp\",\n" - + " \"time_format\": \"epoch_ms\"\n" - + " }\n" - + "}\n" - ); + createJob.setJsonEntity(""" + { + "analysis_config" : { + "bucket_span": "10m", + "detectors": [ + { + "function": "sum", + "field_name": "total" + } + ] + }, + "data_description": { + "time_field": "timestamp", + "time_format": "epoch_ms" + } + } + """); client().performRequest(createJob); } } diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index 7fbe58396d782..4d8105db26648 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -12,8 +12,7 @@ apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' dependencies { - testImplementation "com.fasterxml.jackson.core:jackson-databind:2.10.4" - testImplementation project(':modules:transport-netty4') // for http + testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" testImplementation project(':plugins:transport-nio') // for http } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java index 0d0e167517f79..7806c14156fba 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java @@ -81,8 +81,8 @@ void runTest(Request request, String actionPrefix) throws Exception { for (final IndexService indexService : indicesService) { for (final IndexShard indexShard : indexService) { final Engine engine = IndexShardTestCase.getEngine(indexShard); - if (engine instanceof SearcherBlockingEngine) { - searcherBlocks.add(((SearcherBlockingEngine) engine).searcherBlock); + if (engine instanceof SearcherBlockingEngine searcherBlockingEngine) { + searcherBlocks.add(searcherBlockingEngine.searcherBlock); } } } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java index f921b9919439e..09038d3b6e4b4 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -51,15 +52,15 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { - throw new AssertionError(source, e); + public void onFailure(Exception e) { + throw new AssertionError("update state", e); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { future.onResponse(null); } - }); + }, ClusterStateTaskExecutor.unbatched()); future.actionGet(); } @@ -89,7 +90,7 @@ public void testClusterStateRestCancellation() throws Exception { assertBusy(() -> { updateClusterState(clusterService, s -> ClusterState.builder(s).build()); final List tasks = client().admin().cluster().prepareListTasks().get().getTasks(); - assertTrue(tasks.toString(), tasks.stream().noneMatch(t -> t.getAction().equals(ClusterStateAction.NAME))); + assertTrue(tasks.toString(), tasks.stream().noneMatch(t -> t.action().equals(ClusterStateAction.NAME))); }); updateClusterState(clusterService, s -> ClusterState.builder(s).removeCustom(AssertingCustom.NAME).build()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStatsRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStatsRestCancellationIT.java index ba71c781973d3..c864c90128b8f 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStatsRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStatsRestCancellationIT.java @@ -83,8 +83,8 @@ public void testClusterStateRestCancellation() throws Exception { for (final IndexService indexService : indicesService) { for (final IndexShard indexShard : indexService) { final Engine engine = IndexShardTestCase.getEngine(indexShard); - if (engine instanceof StatsBlockingEngine) { - statsBlocks.add(((StatsBlockingEngine) engine).statsBlock); + if (engine instanceof StatsBlockingEngine statsBlockingEngine) { + statsBlocks.add(statsBlockingEngine.statsBlock); } } } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DanglingIndicesRestIT.java index 491007bd5e74a..6880c558cc4d6 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DanglingIndicesRestIT.java @@ -218,19 +218,20 @@ private Map createIndices(String... indices) throws IOException assert indices.length > 0; for (String index : indices) { - String indexSettings = "{" - + " \"settings\": {" - + " \"index\": {" - + " \"number_of_shards\": 1," - + " \"number_of_replicas\": 2," - + " \"routing\": {" - + " \"allocation\": {" - + " \"total_shards_per_node\": 1" - + " }" - + " }" - + " }" - + " }" - + "}"; + String indexSettings = """ + { + "settings": { + "index": { + "number_of_shards": 1, + "number_of_replicas": 2, + "routing": { + "allocation": { + "total_shards_per_node": 1 + } + } + } + } + }"""; Request request = new Request("PUT", "/" + index); request.setJsonEntity(indexSettings); assertOK(getRestClient().performRequest(request)); @@ -291,13 +292,5 @@ public Settings onNodeStopped(String nodeName) throws Exception { return new DanglingIndexDetails(stoppedNodeName.get(), indexToUUID); } - private static class DanglingIndexDetails { - private final String stoppedNodeName; - private final Map indexToUUID; - - DanglingIndexDetails(String stoppedNodeName, Map indexToUUID) { - this.stoppedNodeName = stoppedNodeName; - this.indexToUUID = indexToUUID; - } - } + private record DanglingIndexDetails(String stoppedNodeName, Map indexToUUID) {} } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java index cf434bb180884..2e41e311073a9 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java @@ -25,12 +25,13 @@ public class HttpCompressionIT extends ESRestTestCase { private static final String GZIP_ENCODING = "gzip"; - private static final String SAMPLE_DOCUMENT = "{\n" - + " \"name\": {\n" - + " \"first name\": \"Steve\",\n" - + " \"last name\": \"Jobs\"\n" - + " }\n" - + "}"; + private static final String SAMPLE_DOCUMENT = """ + { + "name": { + "first name": "Steve", + "last name": "Jobs" + } + }"""; public void testCompressesResponseIfRequested() throws IOException { Request request = new Request("POST", "/company/_doc/2"); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java index 83f48d79a9129..356cb53c4453e 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.netty4.Netty4Plugin; -import org.elasticsearch.transport.nio.MockNioTransportPlugin; import org.elasticsearch.transport.nio.NioTransportPlugin; import org.junit.BeforeClass; @@ -23,20 +22,15 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase { private static String nodeTransportTypeKey; private static String nodeHttpTypeKey; - private static String clientTypeKey; - @SuppressWarnings("unchecked") @BeforeClass public static void setUpTransport() { - nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class)); + nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), NioTransportPlugin.class)); nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4Plugin.class, NioTransportPlugin.class)); - clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class)); } private static String getTypeKey(Class clazz) { - if (clazz.equals(MockNioTransportPlugin.class)) { - return MockNioTransportPlugin.MOCK_NIO_TRANSPORT_NAME; - } else if (clazz.equals(NioTransportPlugin.class)) { + if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_TRANSPORT_NAME; } else { assert clazz.equals(Netty4Plugin.class); @@ -69,7 +63,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { @Override protected Collection> nodePlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); + return Arrays.asList(getTestTransportPlugin(), NioTransportPlugin.class); } @Override diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/IndexingPressureRestIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/IndexingPressureRestIT.java index de3358967d12a..f7e3e98075726 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/IndexingPressureRestIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/IndexingPressureRestIT.java @@ -49,9 +49,16 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { @SuppressWarnings("unchecked") public void testIndexingPressureStats() throws IOException { Request createRequest = new Request("PUT", "/index_name"); - createRequest.setJsonEntity( - "{\"settings\": {\"index\": {\"number_of_shards\": 1, \"number_of_replicas\": 1, " + "\"write.wait_for_active_shards\": 2}}}" - ); + createRequest.setJsonEntity(""" + { + "settings": { + "index": { + "number_of_shards": 1, + "number_of_replicas": 1, + "write.wait_for_active_shards": 2 + } + } + }"""); final Response indexCreatedResponse = getRestClient().performRequest(createRequest); assertThat(indexCreatedResponse.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestGetMappingsCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestGetMappingsCancellationIT.java index ef71486079ad1..423101f6f915c 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestGetMappingsCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestGetMappingsCancellationIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ack.AckedRequest; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -102,7 +103,7 @@ public TimeValue masterNodeTimeout() { public ClusterState execute(ClusterState currentState) throws Exception { return transformationFn.apply(currentState); } - }); + }, ClusterStateTaskExecutor.unbatched()); future.actionGet(); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java index 012939744071b..509ddd648c7b8 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -166,12 +166,12 @@ private static void ensureSearchTaskIsCancelled(String transportAction, Function SetOnce searchTask = new SetOnce<>(); ListTasksResponse listTasksResponse = client().admin().cluster().prepareListTasks().get(); for (TaskInfo task : listTasksResponse.getTasks()) { - if (task.getAction().equals(transportAction)) { + if (task.action().equals(transportAction)) { searchTask.set(task); } } assertNotNull(searchTask.get()); - TaskId taskId = searchTask.get().getTaskId(); + TaskId taskId = searchTask.get().taskId(); String nodeName = nodeIdToName.apply(taskId.getNodeId()); assertBusy(() -> { TaskManager taskManager = internalCluster().getInstance(TransportService.class, nodeName).getTaskManager(); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SystemIndexRestIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SystemIndexRestIT.java index f2245b5830aba..9847534c1c042 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SystemIndexRestIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SystemIndexRestIT.java @@ -14,7 +14,8 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -45,6 +46,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.test.rest.ESRestTestCase.entityAsMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; @@ -92,6 +94,17 @@ public void testSystemIndexAccessBlockedByDefault() throws Exception { // And with a total wildcard assertDeprecationWarningOnAccess(randomFrom("*", "_all"), SystemIndexTestPlugin.SYSTEM_INDEX_NAME); + // If we're not expanding wildcards, we don't get anything + { + Request searchRequest = new Request("GET", "/" + randomFrom("*", "_all") + randomFrom("/_count", "/_search")); + searchRequest.setJsonEntity("{\"query\": {\"match\": {\"some_field\": \"some_value\"}}}"); + searchRequest.addParameter("allow_no_indices", "false"); + + ResponseException exception = expectThrows(ResponseException.class, () -> getRestClient().performRequest(searchRequest)); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + assertThat(exception.getMessage(), containsString("no such index")); + } + // Try to index a doc directly { String expectedWarning = "this request accesses system indices: [" @@ -115,6 +128,7 @@ private void assertDeprecationWarningOnAccess(String queryPattern, String warnin searchRequest.setJsonEntity("{\"query\": {\"match\": {\"some_field\": \"some_value\"}}}"); // Disallow no indices to cause an exception if this resolves to zero indices, so that we're sure it resolved the index searchRequest.addParameter("allow_no_indices", "false"); + searchRequest.addParameter("expand_wildcards", "open,hidden"); searchRequest.setOptions(expectWarnings(expectedWarning)); Response response = getRestClient().performRequest(searchRequest); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestResponseHeaderRestAction.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestResponseHeaderRestAction.java index 24ad25f0ab8cc..3f52de08efe8a 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestResponseHeaderRestAction.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestResponseHeaderRestAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.http; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestRequest; diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index f74d1c1a57fb6..947929ca915f8 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.sort.SortOrder; @@ -35,6 +36,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase.assertSnapshotListSorted; import static org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase.matchAllPattern; @@ -46,10 +48,20 @@ // TODO: dry up duplication across this suite and org.elasticsearch.snapshots.GetSnapshotsIT more public class RestGetSnapshotsIT extends AbstractSnapshotRestTestCase { + /** + * Large snapshot pool settings to set up nodes for tests involving multiple repositories that need to have enough + * threads so that blocking some threads on one repository doesn't block other repositories from doing work + */ + private static final Settings LARGE_SNAPSHOT_POOL_SETTINGS = Settings.builder() + .put("thread_pool.snapshot.core", 3) + .put("thread_pool.snapshot.max", 3) + .build(); + @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(LARGE_SNAPSHOT_POOL_SETTINGS) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order .build(); } @@ -156,7 +168,6 @@ private void doTestPagination(String repoName, List names, GetSnapshotsR assertNull(batch3LargeLimit.next()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79779") public void testSortAndPaginateWithInProgress() throws Exception { final String repoName = "test-repo"; AbstractSnapshotIntegTestCase.createRepository(logger, repoName, "mock"); @@ -176,7 +187,23 @@ public void testSortAndPaginateWithInProgress() throws Exception { inProgressSnapshots.add(AbstractSnapshotIntegTestCase.startFullSnapshot(logger, repoName, snapshotName, false)); } AbstractSnapshotIntegTestCase.awaitNumberOfSnapshotsInProgress(logger, inProgressCount); - + AbstractSnapshotIntegTestCase.awaitClusterState(logger, state -> { + boolean firstIndexSuccessfullySnapshot = state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY) + .asStream() + .flatMap(s -> s.shards().stream()) + .allMatch( + e -> e.getKey().getIndexName().equals("test-index-1") == false + || e.getValue().state() == SnapshotsInProgress.ShardState.SUCCESS + ); + boolean secondIndexIsBlocked = state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY) + .asStream() + .flatMap(s -> s.shards().stream()) + .filter(e -> e.getKey().getIndexName().equals("test-index-2")) + .map(e -> e.getValue().state()) + .collect(Collectors.groupingBy(e -> e, Collectors.counting())) + .equals(Map.of(SnapshotsInProgress.ShardState.INIT, 1L, SnapshotsInProgress.ShardState.QUEUED, (long) inProgressCount - 1)); + return firstIndexSuccessfullySnapshot && secondIndexIsBlocked; + }); assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); diff --git a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml index 4dbfc8b9a3f31..c8ad5f18bc7c9 100644 --- a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml +++ b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml @@ -3,8 +3,8 @@ setup: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -14,6 +14,9 @@ setup: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_shards: 10 number_of_replicas: 1 mappings: @@ -65,8 +68,8 @@ setup: --- "index with replicas and shards is green": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: cluster.health: @@ -76,8 +79,8 @@ setup: --- "each shard has unique _tsids": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: diff --git a/qa/system-indices/build.gradle b/qa/system-indices/build.gradle index 94ee6455eddd1..f8262785d0b0c 100644 --- a/qa/system-indices/build.gradle +++ b/qa/system-indices/build.gradle @@ -26,5 +26,5 @@ testClusters.configureEach { testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' setting 'xpack.security.autoconfiguration.enabled', 'false' - user username: 'rest_user', password: 'rest-user-password', role: 'superuser' + user username: 'rest_user', password: 'rest-user-password' } diff --git a/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/SystemAliasIT.java b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/SystemAliasIT.java new file mode 100644 index 0000000000000..ec4df990bf93e --- /dev/null +++ b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/SystemAliasIT.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.system.indices; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.After; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class SystemAliasIT extends ESRestTestCase { + static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("rest_user", new SecureString("rest-user-password".toCharArray())); + + @After + public void resetFeatures() throws Exception { + client().performRequest(new Request("POST", "/_features/_reset")); + } + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + public void testCreatingSystemIndexWithAlias() throws Exception { + { + Request request = new Request("PUT", "/.internal-unmanaged-index-8"); + request.setJsonEntity("{\"aliases\": {\".internal-unmanaged-alias\": {}}}"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + } + + public void testCreatingSystemIndexWithLegacyAlias() throws Exception { + { + Request request = new Request("PUT", "/_template/system_template"); + request.setJsonEntity( + "{" + + " \"index_patterns\": [\".internal-unmanaged-*\"]," + + " \"aliases\": {" + + " \".internal-unmanaged-alias\": {}" + + " }" + + "}" + ); + request.setOptions(expectWarnings("Legacy index templates are deprecated in favor of composable templates.")); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + { + Request request = new Request("PUT", "/.internal-unmanaged-index-8"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + } + + public void testCreatingSystemIndexWithIndexAliasEndpoint() throws Exception { + { + Request request = new Request("PUT", "/.internal-unmanaged-index-8"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + { + Request request = new Request("PUT", "/.internal-unmanaged-index-8/_alias/.internal-unmanaged-alias"); + request.setOptions( + expectWarnings( + "this request accesses system indices: [.internal-unmanaged-index-8], " + + "but in a future major version, direct access to system indices will be prevented by default" + ) + ); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + } + + public void testCreatingSystemIndexWithAliasEndpoint() throws Exception { + { + Request request = new Request("PUT", "/.internal-unmanaged-index-8"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + { + Request request = new Request("PUT", "/_alias/.internal-unmanaged-alias"); + request.setJsonEntity("{\"index\": \".internal-unmanaged-index-8\"}"); + request.setOptions( + expectWarnings( + "this request accesses system indices: [.internal-unmanaged-index-8], " + + "but in a future major version, direct access to system indices will be prevented by default" + ) + ); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + } + + public void testCreatingSystemIndexWithAliasesEndpoint() throws Exception { + { + Request request = new Request("PUT", "/.internal-unmanaged-index-8"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + { + Request request = new Request("POST", "/_aliases"); + request.setJsonEntity( + "{" + + " \"actions\": [" + + " {" + + " \"add\": {" + + " \"index\": \".internal-unmanaged-index-8\"," + + " \"alias\": \".internal-unmanaged-alias\"" + + " }" + + " }" + + " ]" + + "}" + ); + + request.setOptions( + expectWarnings( + "this request accesses system indices: [.internal-unmanaged-index-8], " + + "but in a future major version, direct access to system indices will be prevented by default" + ) + ); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias"); + } + + @SuppressWarnings("unchecked") + private void assertAliasIsHiddenInIndexResponse(String indexName, String aliasName) throws IOException { + Request request = new Request("GET", "/" + indexName); + request.setOptions( + expectWarnings( + "this request accesses system indices: [" + + indexName + + "], " + + "but in a future major version, direct access to system indices will be prevented by default" + ) + ); + Response response = client().performRequest(request); + Map responseMap = responseAsMap(response); + Map indexMap = (Map) responseMap.get(indexName); + Map settingsMap = (Map) indexMap.get("settings"); + Map indexSettingsMap = (Map) settingsMap.get("index"); + assertThat(indexSettingsMap.get("hidden"), equalTo("true")); + + Map aliasesMap = (Map) indexMap.get("aliases"); + assertThat(aliasesMap.keySet(), equalTo(Set.of(aliasName))); + Map aliasMap = (Map) aliasesMap.get(aliasName); + assertThat(aliasMap.get("is_hidden"), notNullValue()); + assertThat(aliasMap.get("is_hidden"), equalTo(true)); + } + + @SuppressWarnings("unchecked") + private void assertAliasIsHiddenInAliasesEndpoint(String indexName, String aliasName) throws IOException { + Request request = new Request("GET", "/_aliases"); + request.setOptions( + expectWarnings( + "this request accesses system indices: [" + + indexName + + "], " + + "but in a future major version, direct access to system indices will be prevented by default" + ) + ); + Response response = client().performRequest(request); + Map responseMap = responseAsMap(response); + Map indexAliasMap = (Map) responseMap.get(indexName); + Map aliasesMap = (Map) indexAliasMap.get("aliases"); + Map aliasMap = (Map) aliasesMap.get(aliasName); + assertThat(aliasMap.get("is_hidden"), notNullValue()); + assertThat(aliasMap.get("is_hidden"), equalTo(true)); + } +} diff --git a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java index fcf3177f2c94a..2fc58a9a3736d 100644 --- a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java +++ b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java @@ -12,7 +12,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -45,6 +45,9 @@ public class SystemIndicesQA extends Plugin implements SystemIndexPlugin, ActionPlugin { + private static final String INTERNAL_UNMANAGED_INDEX_NAME = ".internal-unmanaged-index*"; + private static final String INTERNAL_MANAGED_INDEX_NAME = ".internal-managed-index*"; + @Override public String getFeatureName() { return "system indices qa"; @@ -73,6 +76,32 @@ public Collection getSystemIndexDescriptors(Settings sett .setOrigin("net-new") .setVersionMetaKey("version") .setPrimaryIndex(".net-new-system-index-" + Version.CURRENT.major) + .build(), + SystemIndexDescriptor.builder() + .setIndexPattern(INTERNAL_UNMANAGED_INDEX_NAME) + .setDescription("internal unmanaged system index") + .setType(SystemIndexDescriptor.Type.INTERNAL_UNMANAGED) + .setOrigin("qa") + .setVersionMetaKey("version") + .setPrimaryIndex(".internal-unmanaged-index-" + Version.CURRENT.major) + .setAliasName(".internal-unmanaged-alias") + .build(), + SystemIndexDescriptor.builder() + .setIndexPattern(INTERNAL_MANAGED_INDEX_NAME) + .setDescription("internal managed system index") + .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) + .setMappings(mappings()) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") + .build() + ) + .setOrigin("qa") + .setVersionMetaKey("version") + .setPrimaryIndex(".internal-managed-index-" + Version.CURRENT.major) + .setAliasName(".internal-managed-alias") .build() ); } diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index ab6b81e6f5fd6..b211a5db4111b 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -14,7 +14,7 @@ apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatiple { bwcVersion, baseName -> +BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { version = bwcVersion.toString() setting 'xpack.security.enabled', 'true' diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle deleted file mode 100644 index 7290424b6d8e9..0000000000000 --- a/qa/wildfly/build.gradle +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -import org.elasticsearch.gradle.Architecture -import org.elasticsearch.gradle.VersionProperties -import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER; - -apply plugin: 'war' -apply plugin: 'elasticsearch.java' -apply plugin: 'elasticsearch.test.fixtures' -apply plugin: 'elasticsearch.internal-distribution-download' - -dependencies { - providedCompile 'javax.enterprise:cdi-api:1.2' - providedCompile 'org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.0.Final' - providedCompile 'org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec:1.0.0.Final' - api('org.jboss.resteasy:resteasy-jackson2-provider:3.0.19.Final') { - exclude module: 'jackson-annotations' - exclude module: 'jackson-core' - exclude module: 'jackson-databind' - exclude module: 'jackson-jaxrs-json-provider' - } - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" - api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:${versions.jackson}" - api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" - api "org.apache.logging.log4j:log4j-api:${versions.log4j}" - api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api project(path: ':client:rest-high-level', configuration: 'shadow') - testImplementation project(':test:framework') -} - -tasks.named("war").configure { - archiveFileName = 'example-app.war' -} - -// The wildfly docker image is only available for x86 architectures so disable these tests on arm -if (Architecture.current() == Architecture.X64) { - testFixtures.useFixture() -} - -elasticsearch_distributions { - docker { - type = DOCKER - architecture = Architecture.current() - version = VersionProperties.getElasticsearch() - failIfUnavailable = false // This ensures we skip this testing if Docker is unavailable - } -} - -tasks.named("preProcessFixture").configure { - dependsOn "war", elasticsearch_distributions.docker -} - -tasks.register("integTest", Test) { - outputs.doNotCacheIf('Build cache is disabled for Docker tests') { true } - onlyIf { Architecture.current() == Architecture.X64 } - maxParallelForks = '1' - include '**/*IT.class' - systemProperty 'tests.security.manager', 'false' -} - -tasks.named("check").configure { - dependsOn "integTest" -} diff --git a/qa/wildfly/docker-compose.yml b/qa/wildfly/docker-compose.yml deleted file mode 100644 index b1816e1ada57f..0000000000000 --- a/qa/wildfly/docker-compose.yml +++ /dev/null @@ -1,37 +0,0 @@ -version: '3.7' -services: - - wildfly: - image: jboss/wildfly:18.0.1.Final - environment: - JAVA_OPTS: -Delasticsearch.uri=elasticsearch:9200 -Djboss.http.port=8080 -Djava.net.preferIPv4Stack=true - volumes: - - ./build/distributions/example-app.war:/opt/jboss/wildfly/standalone/deployments/example-app.war - ports: - - "8080" - healthcheck: - start_period: 5s - test: ["CMD", "grep", "Admin console listening on", "/opt/jboss/wildfly/standalone/log/server.log"] - interval: 2s - timeout: 1s - retries: 5 - - elasticsearch: - image: elasticsearch:test - environment: - - discovery.type=single-node - - xpack.security.enabled=false - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - ulimits: - memlock: - soft: -1 - hard: -1 - nofile: - soft: 65536 - hard: 65536 - healthcheck: - start_period: 15s - test: ["CMD", "curl", "-f", "-k", "http://localhost:9200"] - interval: 10s - timeout: 2s - retries: 5 diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/model/Employee.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/model/Employee.java deleted file mode 100644 index bb3f37f1072bd..0000000000000 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/model/Employee.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.wildfly.model; - -import com.fasterxml.jackson.annotation.JsonProperty; - -import java.util.List; - -import javax.ws.rs.Consumes; -import javax.ws.rs.core.MediaType; - -@Consumes(MediaType.APPLICATION_JSON) -public class Employee { - - @JsonProperty(value = "first_name") - private String firstName; - - public String getFirstName() { - return firstName; - } - - public void setFirstName(String firstName) { - this.firstName = firstName; - } - - @JsonProperty(value = "last_name") - private String lastName; - - public String getLastName() { - return lastName; - } - - public void setLastName(String lastName) { - this.lastName = lastName; - } - - @JsonProperty(value = "age") - private int age; - - public int getAge() { - return age; - } - - public void setAge(int age) { - this.age = age; - } - - @JsonProperty(value = "about") - private String about; - - public String getAbout() { - return about; - } - - public void setAbout(String about) { - this.about = about; - } - - @JsonProperty(value = "interests") - private List interests; - - public List getInterests() { - return interests; - } - - public void setInterests(List interests) { - this.interests = interests; - } - -} diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java deleted file mode 100644 index e342afb71aca8..0000000000000 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.wildfly.transport; - -import java.util.Collections; -import java.util.Set; - -import javax.ws.rs.ApplicationPath; -import javax.ws.rs.core.Application; - -@ApplicationPath("/transport") -public class RestHighLevelClientActivator extends Application { - - @Override - public Set> getClasses() { - return Collections.singleton(RestHighLevelClientEmployeeResource.class); - } - -} diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java deleted file mode 100644 index ae1f90f32bef5..0000000000000 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.wildfly.transport; - -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.wildfly.model.Employee; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -import javax.inject.Inject; -import javax.ws.rs.GET; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; - -@Path("/employees") -@SuppressWarnings("removal") -public class RestHighLevelClientEmployeeResource { - - @Inject - private RestHighLevelClient client; - - @GET - @Path("/{id}") - @Produces(MediaType.APPLICATION_JSON) - public Response getEmployeeById(final @PathParam("id") Long id) throws IOException { - Objects.requireNonNull(id); - final GetResponse response = client.get(new GetRequest("megacorp", Long.toString(id)), RequestOptions.DEFAULT); - if (response.isExists()) { - final Map source = response.getSource(); - final Employee employee = new Employee(); - employee.setFirstName((String) source.get("first_name")); - employee.setLastName((String) source.get("last_name")); - employee.setAge((Integer) source.get("age")); - employee.setAbout((String) source.get("about")); - @SuppressWarnings("unchecked") - final List interests = (List) source.get("interests"); - employee.setInterests(interests); - return Response.ok(employee).build(); - } else { - return Response.status(Response.Status.NOT_FOUND).build(); - } - } - - @PUT - @Path("/{id}") - @Produces(MediaType.APPLICATION_JSON) - public Response putEmployeeById(final @PathParam("id") Long id, final Employee employee) throws URISyntaxException, IOException { - Objects.requireNonNull(id); - Objects.requireNonNull(employee); - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - { - builder.field("first_name", employee.getFirstName()); - builder.field("last_name", employee.getLastName()); - builder.field("age", employee.getAge()); - builder.field("about", employee.getAbout()); - if (employee.getInterests() != null) { - builder.startArray("interests"); - { - for (final String interest : employee.getInterests()) { - builder.value(interest); - } - } - builder.endArray(); - } - } - builder.endObject(); - final IndexRequest request = new IndexRequest("megacorp"); - request.id(Long.toString(id)); - request.source(builder); - final IndexResponse response = client.index(request, RequestOptions.DEFAULT); - if (response.status().getStatus() == 201) { - return Response.created(new URI("/employees/" + id)).build(); - } else { - return Response.ok().build(); - } - } - } - -} diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java deleted file mode 100644 index dd8cdb193343e..0000000000000 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.wildfly.transport; - -import org.apache.http.HttpHost; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.SuppressForbidden; - -import java.nio.file.Path; - -import javax.enterprise.inject.Produces; - -@SuppressWarnings("unused") -public final class RestHighLevelClientProducer { - - @Produces - @SuppressWarnings("removal") - public RestHighLevelClient createRestHighLevelClient() { - String httpUri = System.getProperty("elasticsearch.uri"); - - return new RestHighLevelClient(RestClient.builder(HttpHost.create(httpUri))); - } - - @SuppressForbidden(reason = "get path not configured in environment") - private Path getPath(final String elasticsearchProperties) { - return PathUtils.get(elasticsearchProperties); - } -} diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java deleted file mode 100644 index d183f0bf51e22..0000000000000 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.wildfly.transport; - -import org.jboss.resteasy.plugins.providers.jackson.ResteasyJackson2Provider; - -import javax.ws.rs.ext.Provider; - -@Provider -public class RestHighLevelJacksonJsonProvider extends ResteasyJackson2Provider { - -} diff --git a/qa/wildfly/src/main/resources/log4j2.properties b/qa/wildfly/src/main/resources/log4j2.properties deleted file mode 100644 index 46877d0de32a0..0000000000000 --- a/qa/wildfly/src/main/resources/log4j2.properties +++ /dev/null @@ -1,9 +0,0 @@ -status = error - -appender.console.type = Console -appender.console.name = console -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n - -rootLogger.level = info -rootLogger.appenderRef.console.ref = console diff --git a/qa/wildfly/src/main/webapp/WEB-INF/beans.xml b/qa/wildfly/src/main/webapp/WEB-INF/beans.xml deleted file mode 100644 index ef1dc242a0ab1..0000000000000 --- a/qa/wildfly/src/main/webapp/WEB-INF/beans.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - diff --git a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml deleted file mode 100644 index 7191bfe1268aa..0000000000000 --- a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java deleted file mode 100644 index 117481da61f9b..0000000000000 --- a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.wildfly; - -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.util.EntityUtils; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.TestRuleLimitSysouts; -import org.elasticsearch.cluster.ClusterModule; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.json.JsonXContent; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; - -@TestRuleLimitSysouts.Limit(bytes = 14000) -public class WildflyIT extends ESTestCase { - - private Logger logger = LogManager.getLogger(WildflyIT.class); - - private String buildBaseUrl() { - final String propertyName = "test.fixtures.wildfly.tcp.8080"; - final String port = System.getProperty(propertyName); - if (port == null) { - throw new IllegalStateException( - "Could not find system property " - + propertyName - + ". This test expects to run with the elasticsearch.test.fixtures Gradle plugin" - ); - } - return "http://localhost:" + port + "/example-app/transport"; - } - - public void testRestClient() throws URISyntaxException, IOException { - final String baseUrl = buildBaseUrl(); - - try (CloseableHttpClient client = HttpClientBuilder.create().build()) { - final String endpoint = baseUrl + "/employees/1"; - logger.info("Connecting to uri: " + baseUrl); - - final HttpPut put = new HttpPut(new URI(endpoint)); - - final String body = "{" - + " \"first_name\": \"John\"," - + " \"last_name\": \"Smith\"," - + " \"age\": 25," - + " \"about\": \"I love to go rock climbing\"," - + " \"interests\": [" - + " \"sports\"," - + " \"music\"" - + " ]" - + "}"; - - put.setEntity(new StringEntity(body, ContentType.APPLICATION_JSON)); - try (CloseableHttpResponse response = client.execute(put)) { - int status = response.getStatusLine().getStatusCode(); - assertThat( - "expected a 201 response but got: " + status + " - body: " + EntityUtils.toString(response.getEntity()), - status, - equalTo(201) - ); - } - - logger.info("Fetching resource at " + endpoint); - - final HttpGet get = new HttpGet(new URI(endpoint)); - try ( - CloseableHttpResponse response = client.execute(get); - XContentParser parser = JsonXContent.jsonXContent.createParser( - new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - response.getEntity().getContent() - ) - ) { - final Map map = parser.map(); - assertThat(map.get("first_name"), equalTo("John")); - assertThat(map.get("last_name"), equalTo("Smith")); - assertThat(map.get("age"), equalTo(25)); - assertThat(map.get("about"), equalTo("I love to go rock climbing")); - final Object interests = map.get("interests"); - assertThat(interests, instanceOf(List.class)); - @SuppressWarnings("unchecked") - final List interestsAsList = (List) interests; - assertThat(interestsAsList, containsInAnyOrder("sports", "music")); - } - } - } - -} diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 0f26777f04ec7..a7d118861ee87 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -49,6 +49,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTestsByFilePattern("**/indices.upgrade/*.yml", "upgrade api will only get a dummy endpoint returning an exception suggesting to use _reindex") task.skipTestsByFilePattern("**/indices.stats/60_field_usage/*/*.yml", "field usage results will be different between lucene versions") + task.skipTest("bulk/11_dynamic_templates/Dynamic templates", "Error message has changed") task.skipTest("indices.create/20_mix_typeless_typeful/Implicitly create a typed index while there is a typeless template", "Type information about the type is removed and not passed down. The logic to check for this is also removed.") task.skipTest("indices.create/20_mix_typeless_typeful/Implicitly create a typeless index while there is a typed template", "Type information about the type is removed and not passed down. The logic to check for this is also removed.") task.skipTest("delete/70_mix_typeless_typeful/DELETE with typeless API on an index that has types", "Type information about the type is removed and not passed down. The logic to check for this is also removed."); @@ -217,6 +218,8 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> // sync_id is no longer available in SegmentInfos.userData // "indices.flush/10_basic/Index synced flush rest test" task.replaceIsTrue("indices.testing.shards.0.0.commit.user_data.sync_id", "indices.testing.shards.0.0.commit.user_data") + // we can now search using doc values only + task.replaceValueInMatch("fields.object\\.nested1.long.searchable", true) } tasks.register('enforceYamlTestConvention').configure { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index 9f2f1e2475850..9cb5febaad3ce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -31,23 +31,6 @@ "description":"Default index for items which don't provide one" } } - }, - { - "path":"/{index}/{type}/_bulk", - "methods":[ - "POST", - "PUT" - ], - "parts":{ - "index":{ - "type":"string", - "description":"Default index for items which don't provide one" - }, - "type":{ - "type":"string", - "description":"Default document type for items which don't provide one" - } - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index 9cdb226f98c1e..8ed4c04917d3a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -28,32 +28,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}/_create", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 28f12a7d1c26f..7fbc693d069bf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -26,31 +26,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}", - "methods":[ - "DELETE" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 9b57fa6c200eb..cc36acea13492 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -121,18 +121,6 @@ "type":"list", "description":"A comma-separated list of : pairs" }, - "_source":{ - "type":"list", - "description":"True or false to return the _source field or not, or a list of fields to return" - }, - "_source_excludes":{ - "type":"list", - "description":"A list of fields to exclude from the returned _source field" - }, - "_source_includes":{ - "type":"list", - "description":"A list of fields to extract and return from the _source field" - }, "terminate_after":{ "type":"number", "description":"The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early." diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json index 6748dfd4acf55..c854c44d9d761 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json @@ -5,8 +5,7 @@ "description": "Returns results matching a query expressed in Event Query Language (EQL)" }, "stability": "stable", - "visibility":"feature_flag", - "feature_flag":"es.eql_feature_flag_registered", + "visibility":"public", "headers":{ "accept": [ "application/json"], "content_type": ["application/json"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index aff6a275dfcbe..3cad1ab5fd7d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -26,31 +26,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}/_source", - "methods":[ - "HEAD" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document; deprecated and optional starting with 7.0", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json deleted file mode 100644 index 1a3fb54c5ea17..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "indices.exists_type":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-types-exists.html", - "description":"Returns information about whether a particular document type exists. (DEPRECATED)" - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/json"] - }, - "url":{ - "paths":[ - { - "path":"/{index}/_mapping/{type}", - "methods":[ - "HEAD" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names; use `_all` to check the types across all indices" - }, - "type":{ - "type":"list", - "description":"A comma-separated list of document types to check" - } - } - } - ] - }, - "params":{ - "ignore_unavailable":{ - "type":"boolean", - "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "hidden", - "none", - "all" - ], - "default":"open", - "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json index 9afc86ccd3d87..d6ef92551701e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json @@ -63,6 +63,11 @@ "only_expunge_deletes":{ "type":"boolean", "description":"Specify whether the operation should only expunge deleted documents" + }, + "wait_for_completion":{ + "type":"boolean", + "default":true, + "description":"Should the request wait until the force merge is completed." } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json index 0cd4f66edaba1..faafdfcdb588a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json @@ -122,10 +122,6 @@ ], "default":"indices" }, - "types":{ - "type":"list", - "description":"A comma-separated list of document types for the `indexing` index metric" - }, "include_segment_file_sizes":{ "type":"boolean", "description":"Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json index 9dbe40258f12b..44fbfa18a2261 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json @@ -31,28 +31,6 @@ "description":"A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices" } } - }, - { - "path":"/{index}/{type}/_validate/query", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices" - }, - "type":{ - "type":"list", - "description":"A comma-separated list of document types to restrict the operation; leave empty to perform the operation on all types", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.forecast.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.forecast.json index 8a88e9048027f..a468741815b12 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.forecast.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.forecast.json @@ -7,7 +7,8 @@ "stability":"stable", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ @@ -41,6 +42,10 @@ "required":false, "description":"The max memory able to be used by the forecast. Default is 20mb." } + }, + "body":{ + "description": "Query parameters can be specified in the body", + "required":false } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.get_model_snapshot_upgrade_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.get_model_snapshot_upgrade_stats.json new file mode 100644 index 0000000000000..f20b770501133 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.get_model_snapshot_upgrade_stats.json @@ -0,0 +1,40 @@ +{ + "ml.get_model_snapshot_upgrade_stats":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html", + "description":"Gets stats for anomaly detection job model snapshot upgrades that are in progress." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_upgrade/_stats", + "methods":[ + "GET" + ], + "parts":{ + "job_id":{ + "type":"string", + "description":"The ID of the job. May be a wildcard, comma separated list or `_all`." + }, + "snapshot_id":{ + "type":"string", + "description":"The ID of the snapshot. May be a wildcard, comma separated list or `_all`." + } + } + } + ] + }, + "params":{ + "allow_no_match":{ + "type":"boolean", + "required":false, + "description":"Whether to ignore if a wildcard expression matches no jobs or no snapshots. (This includes the `_all` string.)" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.open_job.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.open_job.json index 07b9e666e28e2..b93b9cff82568 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.open_job.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.open_job.json @@ -7,7 +7,8 @@ "stability":"stable", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ @@ -24,6 +25,10 @@ } } ] + }, + "body":{ + "description": "Query parameters can be specified in the body", + "required":false } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json index 5db5f91ddc527..b57f1bb69ffa1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection job." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/json"], "content_type": ["application/json"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json index 30a24b1c6074a..1400da1ccee09 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection detector." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/json"], "content_type": ["application/json"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.rollup_search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.rollup_search.json index 6ad72da006d6c..dfd8c7dec5241 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.rollup_search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.rollup_search.json @@ -24,29 +24,6 @@ "description":"The indices or index-pattern(s) (containing rollup or regular data) that should be searched" } } - }, - { - "path":"/{index}/{type}/_rollup_search", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"The indices or index-pattern(s) (containing rollup or regular data) that should be searched" - }, - "type":{ - "type":"string", - "required":false, - "description":"The doc type inside the index", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.delete_transform.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.delete_transform.json index 0b0e2377bdf30..124efcff17ab2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.delete_transform.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.delete_transform.json @@ -30,7 +30,14 @@ "type":"boolean", "required":false, "description":"When `true`, the transform is deleted regardless of its current state. The default value is `false`, meaning that the transform must be `stopped` before it can be deleted." + }, + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the transform deletion" } } } } + + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.preview_transform.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.preview_transform.json index 65cece3bc1f7e..5740f5d9ab2e0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.preview_transform.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.preview_transform.json @@ -34,6 +34,13 @@ } ] }, + "params":{ + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the preview" + } + }, "body":{ "description":"The definition for the transform to preview", "required":false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.put_transform.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.put_transform.json index 1e81629bd72a0..5f4b986d831ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.put_transform.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.put_transform.json @@ -31,6 +31,11 @@ "type":"boolean", "required":false, "description":"If validations should be deferred until transform starts, defaults to false." + }, + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the transform to start" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.reset_transform.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.reset_transform.json index bd4904446c3d8..d86b9290e8dac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.reset_transform.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.reset_transform.json @@ -30,6 +30,11 @@ "type":"boolean", "required":false, "description":"When `true`, the transform is reset regardless of its current state. The default value is `false`, meaning that the transform must be `stopped` before it can be reset." + }, + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the transform to reset" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.update_transform.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.update_transform.json index c103570a94437..61734de8bef78 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.update_transform.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.update_transform.json @@ -32,6 +32,11 @@ "type":"boolean", "required":false, "description":"If validations should be deferred until transform starts, defaults to false." + }, + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the update" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.upgrade_transforms.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.upgrade_transforms.json index 116418abdf1b4..0ad1412d06e9e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.upgrade_transforms.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.upgrade_transforms.json @@ -25,6 +25,11 @@ "type":"boolean", "required":false, "description":"Whether to only check for updates but don't execute" + }, + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the upgrade" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 450f20c0a49ad..e588777e990ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -27,31 +27,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}/_update", - "methods":[ - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 6083eccebd652..8cf44f289ce01 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -125,18 +125,6 @@ "type":"list", "description":"A comma-separated list of : pairs" }, - "_source":{ - "type":"list", - "description":"True or false to return the _source field or not, or a list of fields to return" - }, - "_source_excludes":{ - "type":"list", - "description":"A list of fields to exclude from the returned _source field" - }, - "_source_includes":{ - "type":"list", - "description":"A list of fields to extract and return from the _source field" - }, "terminate_after":{ "type":"number", "description":"The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early." diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml index 1560b575e4498..ef904b341deb6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml @@ -1,8 +1,8 @@ --- "Dynamic templates": - skip: - version: " - 7.12.99" - reason: "Dynamic templates parameter is added to bulk requests in 7.13" + version: " - 8.1.0" + reason: "Error message has changed in 8.1.0" - do: indices.create: @@ -166,6 +166,6 @@ - match: { errors: true } - match: { items.0.index.status: 400 } - match: { items.0.index.error.type: mapper_parsing_exception } - - match: { items.0.index.error.reason: "Field [foo] must be an object; but it's configured as [keyword] in dynamic template [string]"} + - match: { items.0.index.error.reason: "failed to parse field [foo] of type [keyword] in document with id 'id_11'. Preview of field's value: '{bar=hello world}'"} - match: { items.1.index.status: 201 } - match: { items.1.index.result: created } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/20_indexing_pressure.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/20_indexing_pressure.yml new file mode 100644 index 0000000000000..9178fab25e230 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/20_indexing_pressure.yml @@ -0,0 +1,25 @@ +--- +"Indexing pressure cluster stats": + - skip: + version: " - 8.0.99" + reason: "indexing_pressure in cluster was added in 8.1" + + - do: + cluster.stats: {} + + - gte: { nodes.indexing_pressure.memory.current.combined_coordinating_and_primary_in_bytes: 0 } + - gte: { nodes.indexing_pressure.memory.current.coordinating_in_bytes: 0 } + - gte: { nodes.indexing_pressure.memory.current.primary_in_bytes: 0 } + - gte: { nodes.indexing_pressure.memory.current.replica_in_bytes: 0 } + - gte: { nodes.indexing_pressure.memory.current.all_in_bytes: 0 } + + - gte: { nodes.indexing_pressure.memory.total.combined_coordinating_and_primary_in_bytes: 0 } + - gte: { nodes.indexing_pressure.memory.total.coordinating_in_bytes: 0 } + - gte: { nodes.indexing_pressure.memory.total.primary_in_bytes: 0 } + - gte: { nodes.indexing_pressure.memory.total.replica_in_bytes: 0 } + - gte: { nodes.indexing_pressure.memory.total.all_in_bytes: 0 } + + - gte: { nodes.indexing_pressure.memory.total.coordinating_rejections: 0 } + - gte: { nodes.indexing_pressure.memory.total.primary_rejections: 0 } + - gte: { nodes.indexing_pressure.memory.total.replica_rejections: 0 } + - gte: { nodes.indexing_pressure.memory.limit_in_bytes: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/features.get_features/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/features.get_features/10_basic.yml index f8d08172a0232..55a5c26e5e5cf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/features.get_features/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/features.get_features/10_basic.yml @@ -2,7 +2,7 @@ "Get Features": - skip: features: contains - version: " - 7.99.99" # Adjust this after backport + version: " - 7.11.99" reason: "This API was added in 7.12.0" - do: { features.get_features: {}} - contains: {'features': {'name': 'tasks'}} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/features.reset_features/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/features.reset_features/10_basic.yml index 5aa33ec5e4255..5bdd6852412a7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/features.reset_features/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/features.reset_features/10_basic.yml @@ -2,7 +2,7 @@ "Get Features": - skip: features: contains - version: " - 7.99.99" # Adjust this after backport + version: " - 7.12.99" reason: "This API was added in 7.13.0" - do: { features.get_features: {}} - contains: {'features': {'name': 'tasks'}} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml index 87134e3e18d9d..8b9cde1ad6bea 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml @@ -83,6 +83,18 @@ setup: type: long date: type: date + non_indexed_date: + type: date + index: false + non_indexed_keyword: + type: keyword + index: false + non_indexed_boolean: + type: boolean + index: false + non_indexed_ip: + type: ip + index: false geo: type: keyword object: @@ -178,7 +190,6 @@ setup: index: 'test1,test2,test3' fields: object* - - match: {fields.object\.nested1.long.searchable: false} - match: {fields.object\.nested1.long.aggregatable: true} - match: {fields.object\.nested1.long.indices: ["test3"]} - is_false: fields.object\.nested1.long.non_searchable_indices @@ -198,6 +209,67 @@ setup: - match: {fields.object\.nested2.keyword.indices: ["test3"]} - is_false: fields.object\.nested2.keyword.non_aggregatable_indices - is_false: fields.object\.nested2.keyword.non_searchable_indices + +--- +"Field caps for number field with only doc values": + - skip: + version: " - 8.0.99" + reason: "doc values search was added in 8.1.0" + - do: + field_caps: + index: 'test1,test2,test3' + fields: object* + + - match: {fields.object\.nested1.long.searchable: true} + +--- +"Field caps for date field with only doc values": + - skip: + version: " - 8.0.99" + reason: "doc values search was added in 8.1.0" + - do: + field_caps: + index: 'test1,test2,test3' + fields: non_indexed_date + + - match: {fields.non_indexed_date.date.searchable: true} + +--- +"Field caps for keyword field with only doc values": + - skip: + version: " - 8.0.99" + reason: "doc values search was added in 8.1.0" + - do: + field_caps: + index: 'test1,test2,test3' + fields: non_indexed_keyword + + - match: {fields.non_indexed_keyword.keyword.searchable: true} + +--- +"Field caps for boolean field with only doc values": + - skip: + version: " - 8.0.99" + reason: "doc values search was added in 8.1.0" + - do: + field_caps: + index: 'test1,test2,test3' + fields: non_indexed_boolean + + - match: {fields.non_indexed_boolean.boolean.searchable: true} + +--- +"Field caps for ip field with only doc values": + - skip: + version: " - 8.0.99" + reason: "doc values search was added in 8.1.0" + - do: + field_caps: + index: 'test1,test2,test3' + fields: non_indexed_ip + + - match: {fields.non_indexed_ip.ip.searchable: true} + --- "Get object and nested field caps": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml index 79606d7957c6c..ad36eaf872d57 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml @@ -178,7 +178,7 @@ setup: - is_false: fields.metricset.keyword.non_searchable_indices - is_false: fields.metricset.keyword.non_aggregatable_indices - match: {fields.metricset.keyword.non_dimension_indices: ["tsdb_index2"]} - - is_false: fields.metricset.keyword.mertric_conflicts_indices + - is_false: fields.metricset.keyword.metric_conflicts_indices - match: {fields.non_tsdb_field.keyword.searchable: true} - match: {fields.non_tsdb_field.keyword.aggregatable: true} @@ -188,44 +188,44 @@ setup: - is_false: fields.non_tsdb_field.keyword.non_searchable_indices - is_false: fields.non_tsdb_field.keyword.non_aggregatable_indices - is_false: fields.non_tsdb_field.keyword.non_dimension_indices - - is_false: fields.non_tsdb_field.keyword.mertric_conflicts_indices + - is_false: fields.non_tsdb_field.keyword.metric_conflicts_indices - match: {fields.k8s\.pod\.availability_zone.short.time_series_dimension: true} - is_false: fields.k8s\.pod\.availability_zone.short.time_series_metric - is_false: fields.k8s\.pod\.availability_zone.short.non_dimension_indices - - is_false: fields.k8s\.pod\.availability_zone.short.mertric_conflicts_indices + - is_false: fields.k8s\.pod\.availability_zone.short.metric_conflicts_indices - match: {fields.k8s\.pod\.uid.keyword.time_series_dimension: true} - is_false: fields.k8s\.pod\.uid.keyword.time_series_metric - is_false: fields.k8s\.pod\.uid.keyword.non_dimension_indices - - is_false: fields.k8s\.pod\.uid.keyword.mertric_conflicts_indices + - is_false: fields.k8s\.pod\.uid.keyword.metric_conflicts_indices - is_false: fields.k8s\.pod\.name.keyword.time_series_dimension - is_false: fields.k8s\.pod\.name.keyword.time_series_metric - is_false: fields.k8s\.pod\.name.keyword.non_dimension_indices - - is_false: fields.k8s\.pod\.name.keyword.mertric_conflicts_indices + - is_false: fields.k8s\.pod\.name.keyword.metric_conflicts_indices - match: {fields.k8s\.pod\.ip.ip.time_series_dimension: true} - is_false: fields.k8s\.pod\.ip.ip.time_series_metric - is_false: fields.k8s\.pod\.ip.ip.non_dimension_indices - - is_false: fields.k8s\.pod\.ip.ip.mertric_conflicts_indices + - is_false: fields.k8s\.pod\.ip.ip.metric_conflicts_indices - is_false: fields.k8s\.pod\.network\.tx.long.time_series_dimension - is_false: fields.k8s\.pod\.network\.tx.long.time_series_metric - is_false: fields.k8s\.pod\.network\.tx.long.non_dimension_indices - - match: {fields.k8s\.pod\.network\.tx.long.mertric_conflicts_indices: ["tsdb_index1", "tsdb_index2"]} + - match: {fields.k8s\.pod\.network\.tx.long.metric_conflicts_indices: ["tsdb_index1", "tsdb_index2"]} - is_false: fields.k8s\.pod\.network\.rx.integer.time_series_dimension - is_false: fields.k8s\.pod\.network\.rx.integer.time_series_metric - is_false: fields.k8s\.pod\.network\.rx.integer.non_dimension_indices - - match: {fields.k8s\.pod\.network\.rx.integer.mertric_conflicts_indices: ["tsdb_index1", "tsdb_index2"]} + - match: {fields.k8s\.pod\.network\.rx.integer.metric_conflicts_indices: ["tsdb_index1", "tsdb_index2"]} - is_false: fields.k8s\.pod\.network\.packets_dropped.long.time_series_dimension - match: {fields.k8s\.pod\.network\.packets_dropped.long.time_series_metric: gauge} - is_false: fields.k8s\.pod\.network\.packets_dropped.long.non_dimension_indices - - is_false: fields.k8s\.pod\.network\.packets_dropped.long.mertric_conflicts_indices + - is_false: fields.k8s\.pod\.network\.packets_dropped.long.metric_conflicts_indices - is_false: fields.k8s\.pod\.network\.latency.double.time_series_dimension - match: {fields.k8s\.pod\.network\.latency.double.time_series_metric: gauge} - is_false: fields.k8s\.pod\.network\.latency.double.non_dimension_indices - - is_false: fields.k8s\.pod\.network\.latency.double.mertric_conflicts_indices + - is_false: fields.k8s\.pod\.network\.latency.double.metric_conflicts_indices diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml index 78962fc7ccb08..97209f3421fff 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml @@ -29,3 +29,21 @@ - match: { status: 400 } - match: { error.type: action_request_validation_exception } - match: { error.reason: "Validation Failed: 1: cannot set only_expunge_deletes and max_num_segments at the same time, those two parameters are mutually exclusive;" } + +--- +"Force merge with wait_for_completion parameter": + - skip: + version: " - 8.0.99" + reason: wait_for_completion is introduced since 8.1 + + - do: + indices.create: + index: test_index + + - do: + indices.forcemerge: + max_num_segments: 1 + wait_for_completion: false + - match: { task: '/^\S+:\d+$/' } + + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/10_get_feature_upgrade_status.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/10_get_feature_upgrade_status.yml index fefa4bb48230b..c4d48933aae73 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/10_get_feature_upgrade_status.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/10_get_feature_upgrade_status.yml @@ -1,8 +1,8 @@ "Get feature upgrade status": - skip: - version: " - 7.99.99" - reason: "Not yet backported" + version: " - 7.15.99" + reason: "Endpoint added in 7.16.0" - do: migration.get_feature_upgrade_status: {} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/20_post_feature_upgrade.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/20_post_feature_upgrade.yml index fd7e157240dda..c2a99fdcde77d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/20_post_feature_upgrade.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/20_post_feature_upgrade.yml @@ -1,8 +1,8 @@ "Get feature upgrade status": - skip: - version: " - 7.99.99" - reason: "Not yet backported" + version: " - 7.15.99" + reason: "Endpoint added in 7.16.0" - do: migration.get_feature_upgrade_status: {} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml index 4f4b97bbcd521..3c3b4e6dacdf5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml @@ -20,3 +20,48 @@ - gte: { nodes.$node_id.transport.tx_count: 0 } - gte: { nodes.$node_id.transport.rx_size_in_bytes: 0 } - gte: { nodes.$node_id.transport.tx_size_in_bytes: 0 } + +--- +"Transport handling time histogram": + - skip: + version: " - 8.0.99" + reason: "handling_time_histograms were added in 8.1" + features: [arbitrary_key] + + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: + metric: [ transport ] + + - length: { nodes.$node_id.transport.inbound_handling_time_histogram: 18 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.0.count: 0 } + - is_false: nodes.$node_id.transport.inbound_handling_time_histogram.0.ge_millis + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.0.lt_millis: 1 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.1.count: 0 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.1.ge_millis: 1 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.1.lt_millis: 2 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.17.count: 0 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.17.ge_millis: 65536 } + - is_false: nodes.$node_id.transport.inbound_handling_time_histogram.17.lt_millis + + + - length: { nodes.$node_id.transport.outbound_handling_time_histogram: 18 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.0.count: 0 } + - is_false: nodes.$node_id.transport.outbound_handling_time_histogram.0.ge_millis + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.0.lt_millis: 1 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.1.count: 0 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.1.ge_millis: 1 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.1.lt_millis: 2 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.17.count: 0 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.17.ge_millis: 65536 } + - is_false: nodes.$node_id.transport.outbound_handling_time_histogram.17.lt_millis diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 7d4ad735fa96d..eb871da38db0b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -1368,3 +1368,143 @@ huge size: - match: { aggregations.str_terms.buckets.1.doc_count: 2 } - match: { aggregations.str_terms.buckets.2.key: c } - match: { aggregations.str_terms.buckets.2.doc_count: 3 } + +--- +Value type mismatch fails shard: + - skip: + version: " - 8.0.99" + reason: "Fixed in 8.1" + + - do: + indices.create: + index: valuetype_test_1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: keyword + - do: + indices.create: + index: valuetype_test_2 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: ip + + - do: + bulk: + index: valuetype_test_1 + refresh: true + body: | + { "index": {} } + { "ip": "192.168.7.1" } + { "index": {} } + { "ip": "192.168.7.2" } + { "index": {} } + { "ip": "192.168.7.3" } + + - do: + bulk: + index: valuetype_test_2 + refresh: true + body: | + { "index": {} } + { "ip": "127.0.0.1" } + { "index": {} } + { "ip": "192.168.0.1" } + { "index": {} } + { "ip": "192.168.0.2" } + { "index": {} } + { "ip": "192.168.0.3" } + - do: + search: + index: valuetype_test_1,valuetype_test_2 + body: + size: 0 + aggs: + str_terms: + terms: + field: ip + value_type: ip + + - match: { _shards.failed: 1 } + - length: { aggregations.str_terms.buckets: 4 } + - match: { aggregations.str_terms.buckets.0.key: "127.0.0.1" } + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + - match: { aggregations.str_terms.buckets.1.key: "192.168.0.1" } + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + - match: { aggregations.str_terms.buckets.2.key: "192.168.0.2" } + - match: { aggregations.str_terms.buckets.2.doc_count: 1 } + - match: { aggregations.str_terms.buckets.3.key: "192.168.0.3" } + - match: { aggregations.str_terms.buckets.3.doc_count: 1 } + +--- +Value type mismatch fails shard with no docs: + - skip: + version: " - 8.0.99" + reason: "Fixed in 8.1" + + - do: + indices.create: + index: valuetype_test_1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: keyword + - do: + indices.create: + index: valuetype_test_2 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: ip + + - do: + bulk: + index: valuetype_test_2 + refresh: true + body: | + { "index": {} } + { "ip": "127.0.0.1" } + { "index": {} } + { "ip": "192.168.0.1" } + { "index": {} } + { "ip": "192.168.0.2" } + { "index": {} } + { "ip": "192.168.0.3" } + - do: + search: + index: valuetype_test_1,valuetype_test_2 + body: + size: 0 + aggs: + str_terms: + terms: + field: ip + value_type: ip + + - match: { _shards.failed: 1 } + - length: { aggregations.str_terms.buckets: 4 } + - match: { aggregations.str_terms.buckets.0.key: "127.0.0.1" } + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + - match: { aggregations.str_terms.buckets.1.key: "192.168.0.1" } + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + - match: { aggregations.str_terms.buckets.2.key: "192.168.0.2" } + - match: { aggregations.str_terms.buckets.2.doc_count: 1 } + - match: { aggregations.str_terms.buckets.3.key: "192.168.0.3" } + - match: { aggregations.str_terms.buckets.3.doc_count: 1 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/40_range.yml index bc89e5d2c963a..03056b4c81aa0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -16,6 +16,18 @@ setup: half_float: type: half_float + - do: + indices.create: + index: date_range_test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + date: + type: date + format: strict_date_time||strict_date + - do: cluster.health: wait_for_status: yellow @@ -35,6 +47,22 @@ setup: - {"index": {}} - {} + - do: + bulk: + index: date_range_test + refresh: true + body: + - { "index": { } } + - { "date": "2021-05-01T07:10:00Z" } + - { "index": { } } + - { "date": "2021-05-02T08:34:00Z" } + - { "index": { } } + - { "date": "2021-05-03T08:36:00Z" } + - { "index": { } } + - { "date": "2021-05-04T09:05:00Z" } + - { "index": { } } + - { "date": "2021-05-06T09:22:00Z" } + --- "Float Endpoint Exclusive": - skip: @@ -42,6 +70,7 @@ setup: reason: Bug fixed in 7.16.0 - do: search: + index: test body: size: 0 aggs: @@ -70,6 +99,7 @@ setup: reason: Bug fixed in 7.16.0 - do: search: + index: test body: size: 0 aggs: @@ -91,10 +121,89 @@ setup: - match: { aggregations.double_range.buckets.1.key: "0.0152-1.0" } - match: { aggregations.double_range.buckets.1.doc_count: 2 } +--- +"Float range": + - skip: + version: " - 7.16.99" + reason: Bug fixed in 8.1.0 and backported to 7.17.0 + - do: + search: + index: test + body: + size: 0 + aggs: + float_range: + range: + field: "float" + ranges: + - + to: 6.0 + - + from: 6.0 + to: 10.6 + - + from: 10.6 + + - match: { hits.total.relation: "eq" } + - match: { hits.total.value: 4 } + - length: { aggregations.float_range.buckets: 3 } + - match: { aggregations.float_range.buckets.0.key: "*-6.0" } + - is_false: aggregations.float_range.buckets.0.from + - match: { aggregations.float_range.buckets.0.to: 6.0 } + - match: { aggregations.float_range.buckets.0.doc_count: 3 } + - match: { aggregations.float_range.buckets.1.key: "6.0-10.6" } + - match: { aggregations.float_range.buckets.1.from: 6.0 } + - match: { aggregations.float_range.buckets.1.to: 10.6 } + - match: { aggregations.float_range.buckets.1.doc_count: 0 } + - match: { aggregations.float_range.buckets.2.key: "10.6-*" } + - match: { aggregations.float_range.buckets.2.from: 10.6 } + - is_false: aggregations.float_range.buckets.2.to + - match: { aggregations.float_range.buckets.2.doc_count: 0 } + --- "Double range": + - skip: + version: " - 7.16.99" + reason: Bug fixed in 8.1.0 and backported to 7.17.0 + - do: + search: + index: test + body: + size: 0 + aggs: + float_range: + range: + field: "double" + ranges: + - + to: 6.0 + - + from: 6.0 + to: 10.6 + - + from: 10.6 + + - match: { hits.total.relation: "eq" } + - match: { hits.total.value: 4 } + - length: { aggregations.float_range.buckets: 3 } + - match: { aggregations.float_range.buckets.0.key: "*-6.0" } + - is_false: aggregations.float_range.buckets.0.from + - match: { aggregations.float_range.buckets.0.to: 6.0 } + - match: { aggregations.float_range.buckets.0.doc_count: 0 } + - match: { aggregations.float_range.buckets.1.key: "6.0-10.6" } + - match: { aggregations.float_range.buckets.1.from: 6.0 } + - match: { aggregations.float_range.buckets.1.to: 10.6 } + - match: { aggregations.float_range.buckets.1.doc_count: 0 } + - match: { aggregations.float_range.buckets.2.key: "10.6-*" } + - match: { aggregations.float_range.buckets.2.from: 10.6 } + - is_false: aggregations.float_range.buckets.2.to + - match: { aggregations.float_range.buckets.2.doc_count: 3 } + +--- +"Double range no decimal": - do: search: + index: test body: size: 0 aggs: @@ -130,6 +239,7 @@ setup: "Double range with missing value": - do: search: + index: test body: size: 0 aggs: @@ -166,6 +276,7 @@ setup: "Null to and from": - do: search: + index: test body: size: 0 aggs: @@ -203,6 +314,7 @@ setup: "Range agg on long field": - do: search: + index: test body: size: 0 aggs: @@ -238,6 +350,7 @@ setup: "Double range default keyed response": - do: search: + index: test body: size: 0 aggs: @@ -269,3 +382,30 @@ setup: - match: { aggregations.double_range.buckets.last.from: 150.0 } - is_false: aggregations.double_range.buckets.last.to - match: { aggregations.double_range.buckets.last.doc_count: 0 } + +--- +"Range aggregation on date field": + - skip: + version: " - 7.16.99" + reason: Fixed in 8.1.0 and backported to 7.17.0 + + - do: + search: + index: date_range_test + body: + size: 0 + aggs: + date_range: + range: + field: date + ranges: + { from: 2021-05-01T00:00:00Z, to: 2021-05-05T00:00:00Z } + + - match: { hits.total.value: 5 } + - length: { aggregations.date_range.buckets: 1 } + - match: { aggregations.date_range.buckets.0.doc_count: 4 } + - match: { aggregations.date_range.buckets.0.key: "2021-05-01T00:00:00.000Z-2021-05-05T00:00:00.000Z" } + - match: { aggregations.date_range.buckets.0.from: 1619827200000 } + - match: { aggregations.date_range.buckets.0.from_as_string: "2021-05-01T00:00:00.000Z" } + - match: { aggregations.date_range.buckets.0.to: 1620172800000 } + - match: { aggregations.date_range.buckets.0.to_as_string: "2021-05-05T00:00:00.000Z" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/440_weighted_avg.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/440_weighted_avg.yml new file mode 100644 index 0000000000000..2ffc7eaf74625 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/440_weighted_avg.yml @@ -0,0 +1,199 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + double_field: + type: double + long_field: + type: long + integer_field: + type: integer + multi_value_field: + type: integer + weight: + type: integer + + - do: + bulk: + refresh: true + index: test + body: + - '{ "index": {} }' + - '{ "double_field": 1.0, "long_field": 6, "integer_field": 10, "multi_value_field": [1, 2, 3], "weight": 2 }' + - '{ "index": {} }' + - '{ "double_field": 3.0, "long_field": 2, "multi_value_field": [0, 1, 4], "weight": 4 }' + - '{ "index": {} }' + - '{ "double_field": 1.0, "long_field": 1 }' + - '{ "index": {} }' + - '{ "double_field": 5.0, "long_field": 3, "integer_field": 4, "weight": 3 }' + +--- +"Basic test": + - skip: + features: close_to + - do: + search: + body: + aggs: + weighted_long_avg: + weighted_avg: + value: + field: long_field + weight: + field: weight + weighted_double_avg: + weighted_avg: + value: + field: double_field + weight: + field: weight + weighted_integer_avg: + weighted_avg: + value: + field: integer_field + weight: + field: weight + + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - close_to: { aggregations.weighted_long_avg.value: { value: 3.222222, error: 0.000001 } } + - close_to: { aggregations.weighted_double_avg.value: { value: 3.222222, error: 0.000001 } } + - close_to: { aggregations.weighted_integer_avg.value: { value: 6.400000, error: 0.000001 } } + +--- +"Value with explicit missing configuration": + - skip: + features: close_to + - do: + search: + body: + aggs: + weighted_integer_avg: + weighted_avg: + value: + field: integer_field + missing: 2 + weight: + field: weight + + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - close_to: { aggregations.weighted_integer_avg.value: { value: 4.444444, error: 0.000001 } } + +--- +"Weight with explicit missing configuration": + - skip: + features: close_to + - do: + search: + body: + aggs: + weighted_long_avg: + weighted_avg: + value: + field: long_field + weight: + field: weight + missing: 4 + + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - close_to: { aggregations.weighted_long_avg.value: { value: 2.538461, error: 0.000001 } } + +--- +"Missing value results in skipping document": + - skip: + features: close_to + - do: + search: + body: + aggs: + weighted_integer_avg: + weighted_avg: + value: + field: integer_field + weight: + field: weight + missing: 1 + + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - close_to: { aggregations.weighted_integer_avg.value: { value: 6.400000, error: 0.000001 } } + +--- +"Missing weight results in skipping document": + - skip: + features: close_to + - do: + search: + body: + aggs: + weighted_long_avg: + weighted_avg: + value: + field: long_field + weight: + field: weight + + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - close_to: { aggregations.weighted_long_avg.value: { value: 3.222222, error: 0.000001 } } + +--- +"Sum of weights equal to zero results in null weighted average": + - do: + search: + body: + aggs: + weighted_integer_avg: + weighted_avg: + value: + field: integer_field + weight: + field: unknown + missing: 0 + + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { aggregations.weighted_integer_avg.value: null } + +--- +"Multi value field": + - skip: + features: close_to + - do: + search: + body: + aggs: + weighted_multi_value_avg: + weighted_avg: + value: + field: multi_value_field + weight: + field: weight + + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - close_to: { aggregations.weighted_multi_value_avg.value: { value: 1.777777, error: 0.000001 } } + +--- +"Multi weight field not allowed": + - skip: + features: close_to + - do: + catch: request + search: + body: + aggs: + weighted_multi_weight_avg: + weighted_avg: + value: + field: integer_field + weight: + field: multi_value_field diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_time_series.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_time_series.yml new file mode 100644 index 0000000000000..a9284120ebddc --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_time_series.yml @@ -0,0 +1,81 @@ +setup: + - skip: + version: " - 8.1.0" + reason: Suipport for time_series aggs was added in 8.1.0 + + - do: + indices.create: + index: tsdb + body: + settings: + number_of_replicas: 0 + mode: time_series + routing_path: [key] + time_series: + start_time: "2021-01-01T00:00:00Z" + end_time: "2022-01-01T00:00:00Z" + mappings: + properties: + key: + type: keyword + time_series_dimension: true + "@timestamp": + type: date + + - do: + cluster.health: + wait_for_status: green + + - do: + bulk: + index: tsdb + refresh: true + body: + - '{ "index": {} }' + - '{ "key": "bar", "val": 2, "@timestamp": "2021-01-01T00:00:10Z" }' + - '{ "index": {}}' + - '{ "key": "bar", "val": 10, "@timestamp": "2021-01-01T00:00:00Z" }' + - '{ "index": {}}' + - '{ "key": "bar", "val": 50, "@timestamp": "2021-01-01T00:00:30Z" }' + - '{ "index": {}}' + - '{ "key": "bar", "val": 40, "@timestamp": "2021-01-01T00:00:20Z" }' + + # Let's try to create another segment to make things a bit more interesting + - do: + bulk: + index: tsdb + refresh: true + body: + - '{ "index": {} }' + - '{ "key": "foo", "val": 20, "@timestamp": "2021-01-01T00:00:00Z" }' + - '{ "create": {} }' + - '{ "key": "foo", "val": 30, "@timestamp": "2021-01-01T00:10:00Z" }' + - '{ "index": {} }' + - '{ "key": "baz", "val": 20, "@timestamp": "2021-01-01T00:00:00Z" }' + - '{ "index": {} }' + - '{ "key": "baz", "val": 20, "@timestamp": "2021-01-01T00:00:00" }' + +--- +"Basic test": + - do: + search: + index: tsdb + body: + query: + range: + "@timestamp": + gte: "2021-01-01T00:10:00Z" + size: 0 + aggs: + ts: + time_series: + keyed: false + + + + - match: { hits.total.value: 1 } + - length: { aggregations.ts.buckets: 1 } + + - match: { aggregations.ts.buckets.0.key: { "key": "foo" } } + - match: { aggregations.ts.buckets.0.doc_count: 1 } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml index 15cea782c4679..c6509546ca94b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml @@ -10,6 +10,10 @@ setup: created_at: type: date format: "yyyy-MM-dd" + created_at_not_indexed: + type: date + index: false + format: "yyyy-MM-dd" - do: indices.create: index: index_2 @@ -21,6 +25,10 @@ setup: created_at: type: date_nanos format: "yyyy-MM-dd" + created_at_not_indexed: + type: date + index: false + format: "yyyy-MM-dd" - do: indices.create: index: index_3 @@ -32,6 +40,10 @@ setup: created_at: type: date format: "yyyy-MM-dd" + created_at_not_indexed: + type: date + index: false + format: "yyyy-MM-dd" --- @@ -222,3 +234,53 @@ setup: - length: { hits.hits: 1 } - match: {hits.hits.0._id: "3" } - length: { aggregations.idx_terms.buckets: 3 } + +--- +"prefilter on non-indexed date fields": + - skip: + version: "- 8.0.99" + reason: "doc values search was added in 8.1.0" + + - do: + index: + index: index_1 + id: 1 + body: { "created_at_not_indexed": "2016-01-01"} + - do: + index: + index: index_2 + id: 2 + body: { "created_at_not_indexed": "2017-01-01" } + + - do: + index: + index: index_3 + id: 3 + body: { "created_at_not_indexed": "2018-01-01" } + - do: + indices.refresh: {} + + + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "query" : { "range" : { "created_at_not_indexed" : { "gte" : "2016-02-01", "lt": "2018-02-01"} } } } + + - match: { _shards.total: 3 } + - match: { _shards.successful: 3 } + - match: { _shards.skipped: 0 } + - match: { _shards.failed: 0 } + - match: { hits.total: 2 } + + # this is a case where we would normally skip due to rewrite but we can't because we only have doc values + - do: + search: + rest_total_hits_as_int: true + pre_filter_shard_size: 1 + body: { "size" : 0, "query" : { "range" : { "created_at_not_indexed" : { "gte" : "2016-02-01", "lt": "2018-02-01"} } } } + + - match: { _shards.total: 3 } + - match: { _shards.successful: 3 } + - match: { _shards.skipped : 0 } + - match: { _shards.failed: 0 } + - match: { hits.total: 2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/390_doc_values_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/390_doc_values_search.yml new file mode 100644 index 0000000000000..323c521f4d128 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/390_doc_values_search.yml @@ -0,0 +1,318 @@ +--- +setup: + - skip: + version: " - 8.0.99" + reason: "doc values search was added in 8.1.0" + + - do: + indices.create: + index: test + body: + mappings: + dynamic: false + properties: + byte: + type: byte + index: false + double: + type: double + index: false + float: + type: float + index: false + half_float: + type: half_float + index: false + integer: + type: integer + index: false + long: + type: long + index: false + short: + type: short + index: false + date: + type: date + format: yyyy/MM/dd + index: false + keyword: + type: keyword + index: false + boolean: + type: boolean + index: false + ip: + type: ip + index: false + + - do: + index: + index: test + id: 1 + body: + byte : 1 + double : 1.0 + float : 1.0 + half_float: 1.0 + integer: 1 + long: 1 + short: 1 + date: "2017/01/01" + keyword: "key1" + boolean: "false" + ip: "192.168.0.1" + + - do: + index: + index: test + id: 2 + body: + byte : 2 + double : 2.0 + float : 2.0 + half_float: 2.0 + integer: 2 + long: 2 + short: 2 + date: "2017/01/02" + keyword: "key2" + boolean: "true" + ip: "192.168.0.2" + + - do: + indices.refresh: {} + +--- +"Test match queries on number fields where only doc values are enabled": + + - do: + search: + index: test + body: { query: { match: { byte: { query: 1 } } } } + - length: { hits.hits: 1 } + + - do: + search: + index: test + body: { query: { match: { double: { query: 1.0 } } } } + - length: { hits.hits: 1 } + + - do: + search: + index: test + body: { query: { match: { float: { query: 1.0 } } } } + - length: { hits.hits: 1 } + + - do: + search: + index: test + body: { query: { match: { half_float: { query: 1.0 } } } } + - length: { hits.hits: 1 } + + - do: + search: + index: test + body: { query: { match: { integer: { query: 1 } } } } + - length: { hits.hits: 1 } + + - do: + search: + index: test + body: { query: { match: { long: { query: 1 } } } } + - length: { hits.hits: 1 } + + - do: + search: + index: test + body: { query: { match: { short: { query: 1 } } } } + - length: { hits.hits: 1 } + +--- +"Test terms queries on number fields where only doc values are enabled": + + - do: + search: + index: test + body: { query: { terms: { byte: [ 1, 2 ] } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { terms: { double: [ 1.0, 2.0 ] } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { terms: { float: [ 1.0, 2.0 ] } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { terms: { half_float: [ 1.0, 2.0 ] } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { terms: { integer: [ 1, 2 ] } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { terms: { long: [ 1, 2 ] } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { terms: { short: [ 1, 2 ] } } } + - length: { hits.hits: 2 } + +--- +"Test range queries on number fields where only doc values are enabled": + + - do: + search: + index: test + body: { query: { range: { byte: { gte: 0 } } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { range: { double: { gte: 0.0 } } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { range: { float: { gte: 0.0 } } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { range: { half_float: { gte: 0.0 } } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { range: { integer: { gte: 0 } } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { range: { long: { gte: 0 } } } } + - length: { hits.hits: 2 } + + - do: + search: + index: test + body: { query: { range: { short: { gte: 0 } } } } + - length: { hits.hits: 2 } + +--- +"Test match query on date field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { match: { date: { query: "2017/01/01" } } } } + - length: { hits.hits: 1 } + +--- +"Test range query on date field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { range: { date: { gte: "2017/01/01" } } } } + - length: { hits.hits: 2 } + +--- +"Test match query on keyword field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { match: { keyword: { query: "key1" } } } } + - length: { hits.hits: 1 } + +--- +"Test terms query on keyword field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { terms: { keyword: [ "key1", "key2" ] } } } + - length: { hits.hits: 2 } + +--- +"Test range query on keyword field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { range: { keyword: { gte: "key1" } } } } + - length: { hits.hits: 2 } + +--- +"Test match query on boolean field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { match: { boolean: { query: "false" } } } } + - length: { hits.hits: 1 } + +--- +"Test terms query on boolean field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { terms: { boolean: [ "false", "true" ] } } } + - length: { hits.hits: 2 } + +--- +"Test range query on boolean field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { range: { boolean: { gte: "false" } } } } + - length: { hits.hits: 2 } + +--- +"Test match query on ip field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { match: { ip: { query: "192.168.0.1" } } } } + - length: { hits.hits: 1 } + +--- +"Test terms query on ip field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { terms: { ip: [ "192.168.0.1", "192.168.0.2" ] } } } + - length: { hits.hits: 2 } + +--- +"Test range query on ip field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { range: { ip: { gte: "192.168.0.1" } } } } + - length: { hits.hits: 2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml index ec67748212a5c..0e22f086096ff 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml @@ -178,3 +178,39 @@ can't shadow metrics: runtime_mappings: deep.deeper.deepest: type: keyword + +--- +# Test that _tsid field is not added if an index is not a time-series index +no _tsid in standard indices: + - skip: + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + + - do: + field_caps: + index: test + fields: [metricset, _tsid] + + - match: {fields.metricset.keyword.searchable: true} + - match: {fields.metricset.keyword.aggregatable: true} + - match: {fields.metricset.keyword.time_series_dimension: true} + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + - is_false: fields._tsid # _tsid metadata field must not exist in non-time-series indices diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/100_composite.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/100_composite.yml new file mode 100644 index 0000000000000..03808bd7cb0f3 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/100_composite.yml @@ -0,0 +1,154 @@ +setup: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + +--- +composite aggregation on tsid: + - skip: + version: " - 8.0.99" + reason: _tsid introduced in 8.1.0 + + - do: + search: + index: test + body: + size: 0 + aggregations: + tsids: + composite: + sources: [ + "tsid": { + "terms": { + "field": "_tsid" + } + }, + "date": { + "date_histogram": { + "field": "@timestamp", + "calendar_interval": "1m" + } + } + ] + + - match: { hits.total.value: 8 } + - length: { aggregations.tsids.buckets: 4 } + - match: { aggregations.tsids.buckets.0.key.tsid.k8s\.pod\.uid: "947e4ced-1786-4e53-9e0c-5c447e959507" } + - match: { aggregations.tsids.buckets.0.key.tsid.metricset: "pod" } + - match: { aggregations.tsids.buckets.0.key.date: 1619635800000} + - match: { aggregations.tsids.buckets.0.doc_count: 3 } + - match: { aggregations.tsids.buckets.1.key.tsid.k8s\.pod\.uid: "947e4ced-1786-4e53-9e0c-5c447e959507" } + - match: { aggregations.tsids.buckets.1.key.tsid.metricset: "pod" } + - match: { aggregations.tsids.buckets.1.key.date: 1619635860000} + - match: { aggregations.tsids.buckets.1.doc_count: 1 } + - match: { aggregations.tsids.buckets.2.key.tsid.k8s\.pod\.uid: "df3145b3-0563-4d3b-a0f7-897eb2876ea9" } + - match: { aggregations.tsids.buckets.2.key.tsid.metricset: "pod" } + - match: { aggregations.tsids.buckets.2.key.date: 1619635800000} + - match: { aggregations.tsids.buckets.2.doc_count: 3 } + - match: { aggregations.tsids.buckets.3.key.tsid.k8s\.pod\.uid: "df3145b3-0563-4d3b-a0f7-897eb2876ea9" } + - match: { aggregations.tsids.buckets.3.key.tsid.metricset: "pod" } + - match: { aggregations.tsids.buckets.3.key.date: 1619635860000} + - match: { aggregations.tsids.buckets.3.doc_count: 1 } + - match: { aggregations.tsids.after_key.tsid.k8s\.pod\.uid: "df3145b3-0563-4d3b-a0f7-897eb2876ea9" } + - match: { aggregations.tsids.after_key.tsid.metricset: "pod" } + - match: { aggregations.tsids.after_key.date: 1619635860000} + +--- +composite aggregation on tsid with after: + - skip: + version: " - 8.0.99" + reason: _tsid introduced in 8.1.0 + + - do: + search: + index: test + body: + size: 0 + aggregations: + tsids: + composite: + sources: [ + "tsid": { + "terms": { + "field": "_tsid" + } + }, + "date": { + "date_histogram": { + "field": "@timestamp", + "calendar_interval": "1m" + } + } + ] + after: { + tsid: { k8s.pod.uid: "df3145b3-0563-4d3b-a0f7-897eb2876ea9", metricset: "pod" }, + date: 1619635800000 + } + + - match: { hits.total.value: 8 } + - length: { aggregations.tsids.buckets: 1 } + - match: { aggregations.tsids.buckets.0.key.tsid.k8s\.pod\.uid: "df3145b3-0563-4d3b-a0f7-897eb2876ea9" } + - match: { aggregations.tsids.buckets.0.key.tsid.metricset: "pod" } + - match: { aggregations.tsids.buckets.0.key.date: 1619635860000} + - match: { aggregations.tsids.buckets.0.doc_count: 1 } + - match: { aggregations.tsids.after_key.tsid.k8s\.pod\.uid: "df3145b3-0563-4d3b-a0f7-897eb2876ea9" } + - match: { aggregations.tsids.after_key.tsid.metricset: "pod" } + - match: { aggregations.tsids.after_key.date: 1619635860000} + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml index 4df1579cd4597..073164e8ac6c5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml @@ -1,7 +1,7 @@ enable: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -11,6 +11,9 @@ enable: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -41,8 +44,8 @@ enable: --- no sort field: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /\[index.mode=time_series\] is incompatible with \[index.sort.field\]/ @@ -52,13 +55,17 @@ no sort field: settings: index: mode: time_series + routing_path: foo + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z sort.field: ['a'] --- no sort order: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /\[index.mode=time_series\] is incompatible with \[index.sort.order\]/ @@ -68,13 +75,17 @@ no sort order: settings: index: mode: time_series + routing_path: foo + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z sort.order: ['DESC'] --- no sort mode: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /\[index.mode=time_series\] is incompatible with \[index.sort.mode\]/ @@ -84,13 +95,17 @@ no sort mode: settings: index: mode: time_series + routing_path: foo + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z sort.mode: ['MIN'] --- no sort missing: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /\[index.mode=time_series\] is incompatible with \[index.sort.missing\]/ @@ -100,13 +115,17 @@ no sort missing: settings: index: mode: time_series + routing_path: foo + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z sort.missing: ['_last'] --- no partitioning: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /\[index.mode=time_series\] is incompatible with \[index.routing_partition_size\]/ @@ -116,14 +135,18 @@ no partitioning: settings: index: mode: time_series + routing_path: foo + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z shards: 5 routing_partition_size: 2 --- routing_path required: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /\[index.mode=time_series\] requires \[index.routing_path\]/ @@ -133,13 +156,16 @@ routing_path required: settings: index: mode: time_series - shards: 5 + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + number_of_shards: 5 --- routing_path is not allowed in standard mode: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /\[index.routing_path\] requires \[index.mode=time_series\]/ @@ -153,8 +179,8 @@ routing_path is not allowed in standard mode: --- routing required: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /routing is forbidden on CRUD operations that target indices in \[index.mode=time_series\]/ @@ -165,6 +191,9 @@ routing required: index: mode: time_series routing_path: foo + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: _routing: required: true @@ -184,6 +213,13 @@ set start_time and end_time: time_series: start_time: 1632625782000 end_time: 1632625792000 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true - do: indices.put_settings: @@ -202,6 +238,23 @@ set start_time and end_time: time_series: end_time: 1632625792000 + - do: + index: + refresh: true + index: test_index + body: { + "@timestamp": 1632625792000, + "metricset": "pod" + } + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: { hits.total.value: 1 } + - match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:52.000Z" ] } + - do: indices.delete: index: test_index @@ -230,3 +283,202 @@ set start_time and end_time without timeseries mode: index: time_series: end_time: 1632625782000 + +--- +set bad start_time and end_time: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [ metricset ] + time_series: + start_time: 1632625782000 + end_time: 1632625792000 + mappings: + properties: + metricset: + type: keyword + time_series_dimension: true + + - do: + catch: /time series index @timestamp value \[2021-09-26T03:09:41Z\] must be larger than 2021-09-26T03:09:42Z/ + index: + index: test_index + body: { + "@timestamp": 1632625781000, + "metricset": "pod" + } + + - do: + catch: /time series index @timestamp value \[2021-09-26T03:09:53Z\] must be smaller than 2021-09-26T03:09:52Z/ + index: + index: test_index + body: { + "@timestamp": 1632625793000, + "metricset": "pod" + } + +--- +check start_time and end_time with data_nano: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-09-26T03:09:42Z + end_time: 2021-09-26T03:09:52Z + mappings: + properties: + "@timestamp": + type: date_nanos + metricset: + type: keyword + time_series_dimension: true + + - do: + index: + refresh: true + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:51.123456789Z", + "metricset": "pod" + } + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: { hits.total.value: 1 } + - match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:51.123456789Z" ] } + + - do: + catch: /time series index @timestamp value \[2010-09-26T03:09:52.123456789Z\] must be larger than 2021-09-26T03:09:42Z/ + index: + index: test_index + body: { + "@timestamp": "2010-09-26T03:09:52.123456789Z", + "metricset": "pod" + } + + - do: + catch: /time series index @timestamp value \[2031-09-26T03:09:52.123456789Z\] must be smaller than 2021-09-26T03:09:52Z/ + index: + index: test_index + body: { + "@timestamp": "2031-09-26T03:09:52.123456789Z", + "metricset": "pod" + } + +--- +check start_time boundary with data_nano: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-09-26T03:09:42Z + end_time: 2021-09-26T03:09:52Z + mappings: + properties: + "@timestamp": + type: date_nanos + metricset: + type: keyword + time_series_dimension: true + + - do: + index: + refresh: true + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:42.123456789Z", + "metricset": "pod" + } + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: { hits.total.value: 1 } + - match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:42.123456789Z" ] } + + - do: + catch: /time series index @timestamp value \[2021-09-26T03:09:41.123456789Z\] must be larger than 2021-09-26T03:09:42Z/ + index: + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:41.123456789Z", + "metricset": "pod" + } + +--- +check end_time boundary with data_nano: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-09-26T03:09:42Z + end_time: 2021-09-26T03:09:52Z + mappings: + properties: + "@timestamp": + type: date_nanos + metricset: + type: keyword + time_series_dimension: true + + - do: + index: + refresh: true + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:51.123456789Z", + "metricset": "pod" + } + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: { hits.total.value: 1 } + - match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:51.123456789Z" ] } + + - do: + catch: /time series index @timestamp value \[2021-09-26T03:09:52.123456789Z\] must be smaller than 2021-09-26T03:09:52Z/ + index: + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:52.123456789Z", + "metricset": "pod" + } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml index 9d61d4c359b6d..76fb0282df902 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml @@ -2,8 +2,8 @@ --- date: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -13,6 +13,9 @@ date: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -48,8 +51,8 @@ date: --- date_nanos: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -59,6 +62,9 @@ date_nanos: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -94,8 +100,8 @@ date_nanos: --- automatically add with date: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -105,6 +111,9 @@ automatically add with date: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -138,8 +147,8 @@ automatically add with date: --- reject @timestamp with wrong type: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /data stream timestamp field \[@timestamp\] is of type \[keyword\], but \[date,date_nanos\] is expected/ @@ -150,6 +159,9 @@ reject @timestamp with wrong type: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -160,11 +172,11 @@ reject @timestamp with wrong type: --- reject timestamp meta field with wrong type: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: - catch: /.* time series index \[_data_stream_timestamp\] meta field must be enabled/ + catch: /\[_data_stream_timestamp\] meta field has been disabled/ indices.create: index: test body: @@ -172,8 +184,158 @@ reject timestamp meta field with wrong type: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: _data_stream_timestamp: enabled: false + +--- +enable timestamp meta field: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + number_of_replicas: 0 + number_of_shards: 2 + mappings: + _data_stream_timestamp: + enabled: true + + - do: + indices.get_mapping: + index: test + - match: { "test.mappings.properties.@timestamp.type": date } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } + +--- +reject bad timestamp meta field: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + catch: /\[_data_stream_timestamp\] config must be an object/ + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + number_of_replicas: 0 + number_of_shards: 2 + mappings: + _data_stream_timestamp: enabled + +--- +write without timestamp: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + + - do: + indices.get_mapping: + index: test + - match: { "test.mappings.properties.@timestamp.type": date } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } + + - do: + catch: /data stream timestamp field \[@timestamp\] is missing/ + index: + index: test + body: + "metricset": "pod" + +--- +explicitly enable timestamp meta field: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + meta: + field_meta: time_series + metricset: + type: keyword + time_series_dimension: true + + - do: + index: + refresh: true + index: test + body: { + "@timestamp": 2021-04-28T18:50:53.142Z, + "metricset": "pod", + "new_field" : "value" + } + + - do: + search: + index: test + body: + docvalue_fields: [ '@timestamp', 'new_field.keyword' ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.@timestamp: [ "2021-04-28T18:50:53.142Z" ] } + - match: { hits.hits.0.fields.new_field\.keyword: [ "value" ] } + + - do: + indices.get_mapping: + index: test + - match: { "test.mappings.properties.@timestamp.type": date } + - match: { "test.mappings.properties.@timestamp.meta.field_meta": time_series } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index c7b8b97b32ff4..c01fa5f24de44 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -1,7 +1,7 @@ ecs style: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -11,6 +11,9 @@ ecs style: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -53,8 +56,8 @@ ecs style: --- top level dim object: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -64,6 +67,9 @@ top level dim object: index: mode: time_series routing_path: [dim.*] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -108,8 +114,8 @@ top level dim object: --- non keyword matches routing_path: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: '/All fields that match routing_path must be keywords with \[time_series_dimension: true\] and without the \[script\] parameter. \[@timestamp\] was \[date\]./' @@ -120,6 +126,9 @@ non keyword matches routing_path: index: mode: time_series routing_path: [metricset, k8s.pod.uid, "@timestamp"] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -150,8 +159,8 @@ non keyword matches routing_path: --- runtime field matching routing path: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -161,6 +170,9 @@ runtime field matching routing path: index: mode: time_series routing_path: [dim.*] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": @@ -189,8 +201,8 @@ runtime field matching routing path: --- "dynamic: runtime matches routing_path": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -200,10 +212,16 @@ runtime field matching routing path: index: mode: time_series routing_path: [dim.*] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": type: date + dim_kw: + type: "keyword" + time_series_dimension: true dim: type: object dynamic: runtime @@ -214,14 +232,14 @@ runtime field matching routing path: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim": {"foo": "a"}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim_kw": "dim", "dim": {"foo": "a"}}' - match: {items.0.index.error.reason: "All fields that match routing_path must be keywords with [time_series_dimension: true] and without the [script] parameter. [dim.foo] was a runtime [keyword]."} --- "dynamic: false matches routing_path": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -231,6 +249,9 @@ runtime field matching routing path: index: mode: time_series routing_path: [dim.*] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml index e606e4dd82ca2..48986cfe82d74 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml @@ -17,8 +17,8 @@ teardown: --- "Create a snapshot and then restore it": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 features: ["allowed_warnings"] # Create index @@ -30,6 +30,9 @@ teardown: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -134,10 +137,13 @@ teardown: search: index: test_index body: + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - match: {hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507} - # TODO assert the _tsid once we generate it + - match: {hits.hits.0.fields._tsid: [ { k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod } ] } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml index 223d87ab96a09..e3b2614cefc95 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -11,6 +11,9 @@ setup: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": @@ -60,8 +63,8 @@ setup: --- query a dimension: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -76,8 +79,8 @@ query a dimension: --- query a metric: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -90,13 +93,26 @@ query a metric: - match: {hits.total.value: 1} -# TODO add test showing that quering _tsid fails +--- +"query tsid fails": + - skip: + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 + + - do: + catch: /\[_tsid\] is not searchable/ + search: + index: test + body: + query: + term: + _tsid: wont't work --- fetch a dimension: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -114,8 +130,8 @@ fetch a dimension: --- fetch a metric: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -134,8 +150,8 @@ fetch a metric: --- fetch a tag: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -151,13 +167,30 @@ fetch a tag: - match: {hits.hits.0.fields.k8s\.pod\.ip: ['10.10.55.2']} - is_false: hits.hits.0.fields._tsid # tsid isn't fetched by default -# TODO add test to fetch the tsid +--- +"fetch the tsid": + - skip: + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 + + - do: + search: + index: test + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- aggregate a dimension: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -178,8 +211,8 @@ aggregate a dimension: --- aggregate a metric: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -206,8 +239,8 @@ aggregate a metric: --- aggregate a tag: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -229,23 +262,62 @@ aggregate a tag: - match: {aggregations.ips.buckets.2.key: 10.10.55.3} - match: {aggregations.ips.buckets.2.doc_count: 4} +--- +"aggregate the tsid": + - skip: + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc -# TODO add a test aggregating the _tsid + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + +--- +"aggregate filter the tsid fails": + - skip: + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 + + - do: + catch: /\[_tsid\] is not searchable/ + search: + index: test + body: + size: 0 + aggs: + tsids: + filter: + term: + _tsid: wont't work --- field capabilities: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: field_caps: index: test - fields: [k8s.pod.uid, k8s.pod.network.rx, k8s.pod.ip, _tsid] + fields: [k8s.pod.uid, k8s.pod.network.rx, k8s.pod.ip, metricset, _tsid] - # TODO assert time_series_metric and time_series_dimension - - match: {fields.k8s\.pod\.uid.keyword.searchable: true} - - match: {fields.k8s\.pod\.uid.keyword.aggregatable: true} + - match: {fields.k8s\.pod\.uid.keyword.searchable: true} + - match: {fields.k8s\.pod\.uid.keyword.aggregatable: true} + - match: {fields.k8s\.pod\.uid.keyword.time_series_dimension: true} - is_false: fields.k8s\.pod\.uid.keyword.indices - is_false: fields.k8s\.pod\.uid.keyword.non_searchable_indices - is_false: fields.k8s\.pod\.uid.keyword.non_aggregatable_indices @@ -259,4 +331,68 @@ field capabilities: - is_false: fields.k8s\.pod\.ip.ip.indices - is_false: fields.k8s\.pod\.ip.ip.non_searchable_indices - is_false: fields.k8s\.pod\.ip.ip.non_aggregatable_indices - # TODO assert tsid once we build it: + - match: {fields.metricset.keyword.searchable: true} + - match: {fields.metricset.keyword.aggregatable: true} + - match: {fields.metricset.keyword.time_series_dimension: true} + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + - match: {fields._tsid._tsid.metadata_field: true} + - match: {fields._tsid._tsid.searchable: false} + - match: {fields._tsid._tsid.aggregatable: true} + - is_false: fields._tsid._tsid.indices + - is_false: fields._tsid._tsid.non_searchable_indices + - is_false: fields._tsid._tsid.non_aggregatable_indices + +--- +ids query: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + # Ingest documents assigning custom ids so we can query them + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id": "u1"}}' + - '{"@timestamp": "2021-04-28T18:45:04.467Z", "metricset": "foo", "k8s": {"pod": {"name": "cat", "uid":"6483d28c-24ee-44f2-926b-63b89d6d8b1b", "ip": "10.10.55.1", "network": {"tx": 2001828691, "rx": 802133794}}}}' + - '{"index": {"_index": "test", "_id": "u2"}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "foo", "k8s": {"pod": {"name": "cat", "uid":"6483d28c-24ee-44f2-926b-63b89d6d8b1b", "ip": "10.10.55.1", "network": {"tx": 2001838691, "rx": 801479970}}}}' + - '{"index": {"_index": "test", "_id": "u3"}}' + - '{"@timestamp": "2021-04-28T18:55:24.467Z", "metricset": "foo", "k8s": {"pod": {"name": "cat", "uid":"6483d28c-24ee-44f2-926b-63b89d6d8b1b", "ip": "10.10.55.1", "network": {"tx": 2001848691, "rx": 801479970}}}}' + + - do: + search: + index: test + body: + fields: + - field: k8s.pod.network.tx + query: + ids: + values: ["u1", "u3"] + sort: ["@timestamp"] + + - match: {hits.total.value: 2} + - match: {hits.hits.0._id: "u1"} + - match: {hits.hits.0.fields.k8s\.pod\.network\.tx: [2001828691]} + - match: {hits.hits.1._id: "u3"} + - match: {hits.hits.1.fields.k8s\.pod\.network\.tx: [2001848691]} + +--- +sort by tsid: + - skip: + version: " - 8.0.99" + reason: _tsid introduced in 8.1.0 + + - do: + search: + index: test + body: + sort: [ "_tsid", "@timestamp" ] + + - match: {hits.total.value: 8} + - match: {hits.hits.0.sort: [{ "k8s.pod.uid" : "947e4ced-1786-4e53-9e0c-5c447e959507", "metricset" : "pod"}, 1619635804467]} + - match: {hits.hits.1.sort: [{ "k8s.pod.uid" : "947e4ced-1786-4e53-9e0c-5c447e959507", "metricset" : "pod"}, 1619635824467]} + - match: {hits.hits.4.sort: [{ "k8s.pod.uid" : "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "metricset" : "pod"}, 1619635803142]} + - match: {hits.hits.7.sort: [{ "k8s.pod.uid" : "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "metricset" : "pod"}, 1619635863142]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml index 5a187ce0b6430..e87338bf7327a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -11,6 +11,9 @@ setup: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": @@ -57,13 +60,40 @@ setup: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' -# TODO search on _tsid in an alias +--- +search an alias: + - skip: + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 + + - do: + indices.put_alias: + index: test + name: test_alias + + - do: + search: + index: test_alias + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} --- index into alias: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.put_alias: @@ -85,4 +115,23 @@ index into alias: - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434595272, "rx": 530605511}}}}' - match: {errors: false} - # TODO search on tsid once we generate it + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 12} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - match: {aggregations.tsids.buckets.2.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.2.doc_count: 4} + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml index ca4aa52e15a13..5e16cabaac9fd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml @@ -1,8 +1,8 @@ --- add dimensions with put_mapping: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -12,6 +12,9 @@ add dimensions with put_mapping: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": @@ -41,18 +44,18 @@ add dimensions with put_mapping: index: test body: fields: - # TODO fetch the tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO Fetch the tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to no dims with dynamic_template over index: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -62,6 +65,9 @@ add dimensions to no dims with dynamic_template over index: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: dynamic_templates: - keywords: @@ -87,17 +93,17 @@ add dimensions to no dims with dynamic_template over index: index: test body: fields: - # TODO fetch the tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch the tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to no dims with dynamic_template over bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -107,6 +113,9 @@ add dimensions to no dims with dynamic_template over bulk: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: dynamic_templates: - keywords: @@ -132,17 +141,17 @@ add dimensions to no dims with dynamic_template over bulk: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to some dims with dynamic_template over index: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -152,6 +161,9 @@ add dimensions to some dims with dynamic_template over index: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: dynamic_templates: - keywords: @@ -181,17 +193,17 @@ add dimensions to some dims with dynamic_template over index: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat, other_dim: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to some dims with dynamic_template over bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -201,6 +213,9 @@ add dimensions to some dims with dynamic_template over bulk: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: dynamic_templates: - keywords: @@ -229,8 +244,8 @@ add dimensions to some dims with dynamic_template over bulk: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat, other_dim: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml index 06eb087567238..8a2ec5497d867 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml @@ -1,8 +1,8 @@ keyword dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -12,6 +12,9 @@ keyword dimension: index: mode: time_series routing_path: [uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": @@ -43,14 +46,36 @@ keyword dimension: - '{"@timestamp": "2021-04-28T18:35:54.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.3}' - is_false: errors - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {uid: 947e4ced-1786-4e53-9e0c-5c447e959507}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} --- long dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -60,6 +85,9 @@ long dimension: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": @@ -93,14 +121,36 @@ long dimension: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:54.467Z", "metricset": "aa", "id": 2, "voltage": 3.3}' - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {id: 1, metricset: aa}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {id: 2, metricset: aa }} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} --- ip dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -110,6 +160,9 @@ ip dimension: index: mode: time_series routing_path: [metricset] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": @@ -143,4 +196,26 @@ ip dimension: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:54.467Z", "metricset": "aa", "ip": "2001:0db8:85a3::8a2e:0370:7334", "voltage": 3.3}' - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: { ip: "10.10.1.1", metricset: aa}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: { ip: "2001:db8:85a3::8a2e:370:7334", metricset: aa }} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml index 8dd33551912a4..ed7e859a851ee 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 features: "arbitrary_key" # Force allocating all shards to a single node so that we can shrink later. @@ -23,6 +23,9 @@ setup: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_shards: 3 number_of_replicas: 0 # ensure everything is allocated on the same data node for shrink @@ -82,11 +85,11 @@ setup: --- split: - skip: - version: all - reason: shard splitting doesn't work yet - features: "arbitrary_key" + version: " - 8.0.99" + reason: index-split check introduced in 8.1.0 - do: + catch: /index-split is not supported because the destination index \[test\] is in time series mode/ indices.split: index: test target: test_split @@ -95,24 +98,11 @@ split: index.number_of_replicas: 0 index.number_of_shards: 6 - - do: - search: - index: test_split - body: - fields: - # TODO fetch tsid - query: - query_string: - query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - - - match: {hits.total.value: 1} - # TODO test fetching tsid - --- shrink: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.shrink: @@ -126,19 +116,20 @@ shrink: search: index: test_shrink body: - # TODO test fetching tsid + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - # TODO test fetching tsid + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- clone: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: _tsid support introduced in 8.1.0 - do: indices.clone: @@ -149,10 +140,102 @@ clone: search: index: test_clone body: - # TODO test fetching tsid + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - # TODO test fetching tsid + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} + +--- +clone no source index: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + indices.create: + index: test_no_source + body: + settings: + index: + mode: time_series + routing_path: [ metricset, k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _source: + enabled: false + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + + - do: + bulk: + refresh: true + index: test_no_source + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + + - do: + indices.put_settings: + index: test_no_source + body: + index.blocks.write: true + + - do: + indices.clone: + index: test_no_source + target: test_no_source_clone + + - do: + search: + index: test_no_source_clone + body: + docvalue_fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index a5b7846399d71..a742d0a8148a8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -11,6 +11,9 @@ setup: index: mode: time_series routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z number_of_replicas: 0 number_of_shards: 2 mappings: @@ -63,8 +66,8 @@ setup: --- index with specified routing: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /indexing with a specified routing is not supported because the destination index \[test\] is in time series mode/ @@ -87,8 +90,8 @@ index with specified routing: --- index with specified routing over _bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: bulk: refresh: true @@ -101,8 +104,8 @@ index with specified routing over _bulk: --- delete: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /delete is not supported because the destination index \[test\] is in time series mode/ @@ -113,8 +116,8 @@ delete: --- delete over _bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: bulk: @@ -127,8 +130,8 @@ delete over _bulk: --- noop update: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: search: @@ -149,8 +152,8 @@ noop update: --- update: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 # We fail even though the document isn't found. - do: @@ -174,8 +177,8 @@ update: --- update over _bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: bulk: @@ -188,8 +191,8 @@ update over _bulk: --- search with routing: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 # We fail even though the document isn't found. - do: @@ -201,8 +204,8 @@ search with routing: --- alias with routing: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /routing is forbidden on CRUD operations that target indices in \[index.mode=time_series\]/ @@ -215,8 +218,8 @@ alias with routing: --- alias with search_routing: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: catch: /routing is forbidden on CRUD operations that target indices in \[index.mode=time_series\]/ diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml new file mode 100644 index 0000000000000..98ff9065dd472 --- /dev/null +++ b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml @@ -0,0 +1,30 @@ +--- +setup: + - skip: + version: "9.0.0 - " + reason: "compatible from 8.x to 7.x" + features: + - "headers" + +--- +moving_avg agg throws exception: + - do: + catch: "/Moving Average aggregation usage is not supported. Use the \\[moving_fn\\] aggregation instead./" + search: + rest_total_hits_as_int: true + body: + aggs: + the_histo: + date_histogram: + field: "date" + calendar_interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_movavg: + moving_avg: + buckets_path: "the_avg" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/sort/10_nested_path_filter.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.sort/10_nested_path_filter.yml similarity index 100% rename from rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/sort/10_nested_path_filter.yml rename to rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.sort/10_nested_path_filter.yml diff --git a/server/build.gradle b/server/build.gradle index 0f9798c53f22d..6cfe6badaf5cc 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.build' -apply plugin: 'nebula.optional-base' apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.internal-cluster-test' @@ -59,7 +58,7 @@ dependencies { // logging api "org.apache.logging.log4j:log4j-api:${versions.log4j}" - api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional + api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "net.java.dev.jna:jna:${versions.jna}" @@ -132,6 +131,9 @@ if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } + tasks.named("internalClusterTest").configure { + systemProperty 'es.index_mode_feature_flag_registered', 'true' + } } tasks.named("thirdPartyAudit").configure { @@ -140,7 +142,6 @@ tasks.named("thirdPartyAudit").configure { 'com.fasterxml.jackson.databind.ObjectMapper', // from log4j - 'com.conversantmedia.util.concurrent.DisruptorBlockingQueue', 'com.conversantmedia.util.concurrent.SpinPolicy', 'com.fasterxml.jackson.databind.SerializationFeature', 'com.fasterxml.jackson.annotation.JsonInclude$Include', @@ -164,8 +165,6 @@ tasks.named("thirdPartyAudit").configure { 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', 'org.fusesource.jansi.Ansi', 'org.fusesource.jansi.AnsiRenderer$Code', - 'com.lmax.disruptor.BlockingWaitStrategy', - 'com.lmax.disruptor.BusySpinWaitStrategy', 'com.lmax.disruptor.EventFactory', 'com.lmax.disruptor.EventTranslator', 'com.lmax.disruptor.EventTranslatorTwoArg', @@ -175,10 +174,7 @@ tasks.named("thirdPartyAudit").configure { 'com.lmax.disruptor.RingBuffer', 'com.lmax.disruptor.Sequence', 'com.lmax.disruptor.SequenceReportingEventHandler', - 'com.lmax.disruptor.SleepingWaitStrategy', - 'com.lmax.disruptor.TimeoutBlockingWaitStrategy', 'com.lmax.disruptor.WaitStrategy', - 'com.lmax.disruptor.YieldingWaitStrategy', 'com.lmax.disruptor.dsl.Disruptor', 'com.lmax.disruptor.dsl.ProducerType', 'javax.jms.Connection', @@ -197,25 +193,17 @@ tasks.named("thirdPartyAudit").configure { 'javax.mail.Transport', 'javax.mail.internet.InternetAddress', 'javax.mail.internet.InternetHeaders', - 'javax.mail.internet.MimeBodyPart', 'javax.mail.internet.MimeMessage', 'javax.mail.internet.MimeMultipart', 'javax.mail.internet.MimeUtility', - 'javax.mail.util.ByteArrayDataSource', 'org.apache.commons.compress.compressors.CompressorStreamFactory', 'org.apache.commons.compress.utils.IOUtils', 'org.apache.commons.csv.CSVFormat', 'org.apache.commons.csv.QuoteMode', - 'org.apache.kafka.clients.producer.Callback', - 'org.apache.kafka.clients.producer.KafkaProducer', 'org.apache.kafka.clients.producer.Producer', - 'org.apache.kafka.clients.producer.ProducerRecord', 'org.apache.kafka.clients.producer.RecordMetadata', 'org.codehaus.stax2.XMLStreamWriter2', - 'org.jctools.queues.MessagePassingQueue$Consumer', 'org.jctools.queues.MpscArrayQueue', - 'org.osgi.framework.AdaptPermission', - 'org.osgi.framework.AdminPermission', 'org.osgi.framework.Bundle', 'org.osgi.framework.BundleActivator', 'org.osgi.framework.BundleContext', @@ -253,20 +241,7 @@ tasks.named("licenseHeaders").configure { tasks.named('splitPackagesAudit').configure { // Lucene packages should be owned by Lucene! - ignoreClasses 'org.apache.lucene.queries.BinaryDocValuesRangeQuery', - 'org.apache.lucene.queries.BlendedTermQuery', - 'org.apache.lucene.queries.SpanMatchNoDocsQuery', - 'org.apache.lucene.search.vectorhighlight.CustomFieldQuery', - // These are tricky because Lucene itself splits the index package, // but this should be fixed in Lucene 9 - 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper', - - // cli is owned by the libs/cli, so these should be moved to o.e.server.cli - 'org.elasticsearch.cli.CommandLoggingConfigurator', - 'org.elasticsearch.cli.EnvironmentAwareCommand', - 'org.elasticsearch.cli.KeyStoreAwareCommand', - 'org.elasticsearch.cli.LoggingAwareCommand', - 'org.elasticsearch.cli.LoggingAwareMultiCommand' - + ignoreClasses 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper' } diff --git a/server/licenses/log4j-api-2.11.1.jar.sha1 b/server/licenses/log4j-api-2.11.1.jar.sha1 deleted file mode 100644 index 4b1bfffac179f..0000000000000 --- a/server/licenses/log4j-api-2.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -268f0fe4df3eefe052b57c87ec48517d64fb2a10 \ No newline at end of file diff --git a/server/licenses/log4j-api-2.17.1.jar.sha1 b/server/licenses/log4j-api-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..9d0e5dc631ed5 --- /dev/null +++ b/server/licenses/log4j-api-2.17.1.jar.sha1 @@ -0,0 +1 @@ +d771af8e336e372fb5399c99edabe0919aeaf5b2 \ No newline at end of file diff --git a/server/licenses/log4j-core-2.11.1.jar.sha1 b/server/licenses/log4j-core-2.11.1.jar.sha1 deleted file mode 100644 index 2fb8589380a03..0000000000000 --- a/server/licenses/log4j-core-2.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -592a48674c926b01a9a747c7831bcd82a9e6d6e4 \ No newline at end of file diff --git a/server/licenses/log4j-core-2.17.1.jar.sha1 b/server/licenses/log4j-core-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7d4634f3d4e18 --- /dev/null +++ b/server/licenses/log4j-core-2.17.1.jar.sha1 @@ -0,0 +1 @@ +779f60f3844dadc3ef597976fcb1e5127b1f343d \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-analysis-common-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 1b1aab0f1af55..0000000000000 --- a/server/licenses/lucene-analysis-common-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e382003c9ebbcab79085f752687647281c21ad4 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.0.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..2ed9dbcbe22f6 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.0.0.jar.sha1 @@ -0,0 +1 @@ +f78890829c3d6f15de48fdbc2c77ef4c0e3f005c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-backward-codecs-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index f55f27003e768..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3eb5495498aa61296506966ad2b7121b4a1c62d5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.0.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..acf5a2b543199 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.0.0.jar.sha1 @@ -0,0 +1 @@ +9fb48d0244799e18299449ee62459caab0728490 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-core-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 8994d9a1a36b9..0000000000000 --- a/server/licenses/lucene-core-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc1314d79680a9afce56423303100b0489bc209d \ No newline at end of file diff --git a/server/licenses/lucene-core-9.0.0.jar.sha1 b/server/licenses/lucene-core-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..c874382fc8355 --- /dev/null +++ b/server/licenses/lucene-core-9.0.0.jar.sha1 @@ -0,0 +1 @@ +be679fd274f264e4e8b02bc032d2788cd4076ab4 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-grouping-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 2a1e8094430cb..0000000000000 --- a/server/licenses/lucene-grouping-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9da65e2f7f61c3dac87e0e84befdd5db23a1ef7d \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.0.0.jar.sha1 b/server/licenses/lucene-grouping-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..18a81b5fa97ff --- /dev/null +++ b/server/licenses/lucene-grouping-9.0.0.jar.sha1 @@ -0,0 +1 @@ +27ebe235d427b4e392fabab9b6bfa09524ca7f8b \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-highlighter-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 73e5c855b45b0..0000000000000 --- a/server/licenses/lucene-highlighter-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4117102e9549359f1facc738effeaac1ce51f8c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.0.0.jar.sha1 b/server/licenses/lucene-highlighter-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..5503495c2f86c --- /dev/null +++ b/server/licenses/lucene-highlighter-9.0.0.jar.sha1 @@ -0,0 +1 @@ +a3cb395c2e8c672e6eec951b2b02371a4a883f73 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-join-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 27551bb30d06c..0000000000000 --- a/server/licenses/lucene-join-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0d5fa88ac04dc155031b8c6433ca2cd485866d1 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.0.0.jar.sha1 b/server/licenses/lucene-join-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..dcbaa17875435 --- /dev/null +++ b/server/licenses/lucene-join-9.0.0.jar.sha1 @@ -0,0 +1 @@ +94a855b5d09a6601289aeaeba0f11d5539552590 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-memory-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 0e8913d2e127d..0000000000000 --- a/server/licenses/lucene-memory-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa341663fe6532923891365dfb1ffe80cfb8a58f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.0.0.jar.sha1 b/server/licenses/lucene-memory-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..157597ce9878f --- /dev/null +++ b/server/licenses/lucene-memory-9.0.0.jar.sha1 @@ -0,0 +1 @@ +2371c95031422bc1f501d43ffcc7311baed4b35b \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-misc-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 2eac5c466d8fc..0000000000000 --- a/server/licenses/lucene-misc-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2cd0fc005d2a1a6c73fb2f2ea88940738e4debda \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.0.0.jar.sha1 b/server/licenses/lucene-misc-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..ef031d34305a2 --- /dev/null +++ b/server/licenses/lucene-misc-9.0.0.jar.sha1 @@ -0,0 +1 @@ +25c6170f4fa2f707908dfb92fbafc76727f901e0 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-queries-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 46c681ef046ac..0000000000000 --- a/server/licenses/lucene-queries-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -240ad53af94b4887c88a3d824038672a46443807 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.0.0.jar.sha1 b/server/licenses/lucene-queries-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..4b43c9e6b709a --- /dev/null +++ b/server/licenses/lucene-queries-9.0.0.jar.sha1 @@ -0,0 +1 @@ +87b4c7833d30895baf7091f9cb0db878e970b604 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-queryparser-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 34741bb69438b..0000000000000 --- a/server/licenses/lucene-queryparser-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1250f6369651d0740e49c4fab50586c747e34a0 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.0.0.jar.sha1 b/server/licenses/lucene-queryparser-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..62a4650a168c7 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.0.0.jar.sha1 @@ -0,0 +1 @@ +bf13395ad2033bca3182fcbc83204e8ae1951945 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-sandbox-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index f54aba8ebc093..0000000000000 --- a/server/licenses/lucene-sandbox-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fc09401eea027be3f687c81e5b43a85790b5aa6f \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.0.0.jar.sha1 b/server/licenses/lucene-sandbox-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..4396efda1a83b --- /dev/null +++ b/server/licenses/lucene-sandbox-9.0.0.jar.sha1 @@ -0,0 +1 @@ +3c153a1dc1da3f98083cc932c9476df4b77b0ca5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-spatial3d-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index e2c842c9547bf..0000000000000 --- a/server/licenses/lucene-spatial3d-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51fc08491edd09a1f5ee0ac9a34e029de71b38fd \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.0.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..0722795c260ad --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.0.0.jar.sha1 @@ -0,0 +1 @@ +6b4ee47f218ed3d123c1b07671677a2e4f3c133b \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.0.0-snapshot-cc2a31f2be8.jar.sha1 b/server/licenses/lucene-suggest-9.0.0-snapshot-cc2a31f2be8.jar.sha1 deleted file mode 100644 index 6ed227e2b4a74..0000000000000 --- a/server/licenses/lucene-suggest-9.0.0-snapshot-cc2a31f2be8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7f41a12bad4788c3a54c125e9137fed3a4f07e8 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.0.0.jar.sha1 b/server/licenses/lucene-suggest-9.0.0.jar.sha1 new file mode 100644 index 0000000000000..7eb41e758379e --- /dev/null +++ b/server/licenses/lucene-suggest-9.0.0.jar.sha1 @@ -0,0 +1 @@ +a7d0e7279737114c039f5214082da948732096a6 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 2454599e58585..2efd818685b75 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -66,7 +66,7 @@ import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -278,31 +278,31 @@ public void testBulk() { String[] bulkShardActions = new String[] { BulkAction.NAME + "[s][p]", BulkAction.NAME + "[s][r]" }; interceptTransportActions(bulkShardActions); - List indices = new ArrayList<>(); + List indicesOrAliases = new ArrayList<>(); BulkRequest bulkRequest = new BulkRequest(); int numIndexRequests = iterations(1, 10); for (int i = 0; i < numIndexRequests; i++) { String indexOrAlias = randomIndexOrAlias(); bulkRequest.add(new IndexRequest(indexOrAlias).id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")); - indices.add(indexOrAlias); + indicesOrAliases.add(indexOrAlias); } int numDeleteRequests = iterations(1, 10); for (int i = 0; i < numDeleteRequests; i++) { String indexOrAlias = randomIndexOrAlias(); bulkRequest.add(new DeleteRequest(indexOrAlias).id("id")); - indices.add(indexOrAlias); + indicesOrAliases.add(indexOrAlias); } int numUpdateRequests = iterations(1, 10); for (int i = 0; i < numUpdateRequests; i++) { String indexOrAlias = randomIndexOrAlias(); bulkRequest.add(new UpdateRequest(indexOrAlias, "id").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value1")); - indices.add(indexOrAlias); + indicesOrAliases.add(indexOrAlias); } internalCluster().coordOnlyNodeClient().bulk(bulkRequest).actionGet(); clearInterceptedActions(); - assertIndicesSubset(indices, bulkShardActions); + assertIndicesSubset(indicesOrAliases, bulkShardActions); } public void testGet() { @@ -342,36 +342,36 @@ public void testMultiTermVector() { String multiTermVectorsShardAction = MultiTermVectorsAction.NAME + "[shard][s]"; interceptTransportActions(multiTermVectorsShardAction); - List indices = new ArrayList<>(); + List indicesOrAliases = new ArrayList<>(); MultiTermVectorsRequest multiTermVectorsRequest = new MultiTermVectorsRequest(); int numDocs = iterations(1, 30); for (int i = 0; i < numDocs; i++) { String indexOrAlias = randomIndexOrAlias(); multiTermVectorsRequest.add(indexOrAlias, Integer.toString(i)); - indices.add(indexOrAlias); + indicesOrAliases.add(indexOrAlias); } internalCluster().coordOnlyNodeClient().multiTermVectors(multiTermVectorsRequest).actionGet(); clearInterceptedActions(); - assertIndicesSubset(indices, multiTermVectorsShardAction); + assertIndicesSubset(indicesOrAliases, multiTermVectorsShardAction); } public void testMultiGet() { String multiGetShardAction = MultiGetAction.NAME + "[shard][s]"; interceptTransportActions(multiGetShardAction); - List indices = new ArrayList<>(); + List indicesOrAliases = new ArrayList<>(); MultiGetRequest multiGetRequest = new MultiGetRequest(); int numDocs = iterations(1, 30); for (int i = 0; i < numDocs; i++) { String indexOrAlias = randomIndexOrAlias(); multiGetRequest.add(indexOrAlias, Integer.toString(i)); - indices.add(indexOrAlias); + indicesOrAliases.add(indexOrAlias); } internalCluster().coordOnlyNodeClient().multiGet(multiGetRequest).actionGet(); clearInterceptedActions(); - assertIndicesSubset(indices, multiGetShardAction); + assertIndicesSubset(indicesOrAliases, multiGetShardAction); } public void testFlush() { @@ -385,9 +385,9 @@ public void testFlush() { internalCluster().coordOnlyNodeClient().admin().indices().flush(flushRequest).actionGet(); clearInterceptedActions(); - String[] indices = TestIndexNameExpressionResolver.newInstance() + String[] concreteIndexNames = TestIndexNameExpressionResolver.newInstance() .concreteIndexNames(client().admin().cluster().prepareState().get().getState(), flushRequest); - assertIndicesSubset(Arrays.asList(indices), indexShardActions); + assertIndicesSubset(Arrays.asList(concreteIndexNames), indexShardActions); } public void testForceMerge() { @@ -412,9 +412,9 @@ public void testRefresh() { internalCluster().coordOnlyNodeClient().admin().indices().refresh(refreshRequest).actionGet(); clearInterceptedActions(); - String[] indices = TestIndexNameExpressionResolver.newInstance() + String[] concreteIndexNames = TestIndexNameExpressionResolver.newInstance() .concreteIndexNames(client().admin().cluster().prepareState().get().getState(), refreshRequest); - assertIndicesSubset(Arrays.asList(indices), indexShardActions); + assertIndicesSubset(Arrays.asList(concreteIndexNames), indexShardActions); } public void testClearCache() { @@ -650,13 +650,7 @@ private static void assertIndicesSubset(List indices, boolean optional, } static IndicesRequest convertRequest(TransportRequest request) { - final IndicesRequest indicesRequest; - if (request instanceof IndicesRequest) { - indicesRequest = (IndicesRequest) request; - } else { - indicesRequest = TransportReplicationActionTests.resolveRequest(request); - } - return indicesRequest; + return request instanceof IndicesRequest indicesRequest ? indicesRequest : TransportReplicationActionTests.resolveRequest(request); } private String randomIndexOrAlias() { @@ -670,21 +664,21 @@ private String randomIndexOrAlias() { private String[] randomIndicesOrAliases() { int count = randomIntBetween(1, indices.size() * 2); // every index has an alias - String[] indices = new String[count]; + String[] randomNames = new String[count]; for (int i = 0; i < count; i++) { - indices[i] = randomIndexOrAlias(); + randomNames[i] = randomIndexOrAlias(); } - return indices; + return randomNames; } private String[] randomUniqueIndicesOrAliases() { String[] uniqueIndices = randomUniqueIndices(); - String[] indices = new String[uniqueIndices.length]; + String[] randomNames = new String[uniqueIndices.length]; int i = 0; for (String index : uniqueIndices) { - indices[i++] = randomBoolean() ? index + "-alias" : index; + randomNames[i++] = randomBoolean() ? index + "-alias" : index; } - return indices; + return randomNames; } private String[] randomUniqueIndices() { @@ -771,8 +765,8 @@ synchronized List consumeRequests(String action) { return requests.remove(action); } - synchronized void interceptTransportActions(String... actions) { - Collections.addAll(this.actions, actions); + synchronized void interceptTransportActions(String... transportActions) { + Collections.addAll(this.actions, transportActions); } synchronized void clearInterceptedActions() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/ListenerActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/ListenerActionIT.java index d9a50cb8aab9f..617b78e4f1bb8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/ListenerActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/ListenerActionIT.java @@ -10,8 +10,8 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.test.ESIntegTestCase; import java.util.concurrent.CountDownLatch; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java index dec17b5f92564..178175b8b5554 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java @@ -71,8 +71,7 @@ public void onFailure(Exception e) { // validate all responses for (Object response : responses) { - if (response instanceof SearchResponse) { - SearchResponse searchResponse = (SearchResponse) response; + if (response instanceof SearchResponse searchResponse) { for (ShardSearchFailure failure : searchResponse.getShardFailures()) { assertThat( failure.reason().toLowerCase(Locale.ENGLISH), @@ -82,8 +81,7 @@ public void onFailure(Exception e) { } else { Exception t = (Exception) response; Throwable unwrap = ExceptionsHelper.unwrapCause(t); - if (unwrap instanceof SearchPhaseExecutionException) { - SearchPhaseExecutionException e = (SearchPhaseExecutionException) unwrap; + if (unwrap instanceof SearchPhaseExecutionException e) { for (ShardSearchFailure failure : e.shardFailures()) { assertThat( failure.reason().toLowerCase(Locale.ENGLISH), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 8191732579392..f539db0c303bd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -55,20 +55,12 @@ public void testHotThreadsDontFail() throws ExecutionException, InterruptedExcep } nodesHotThreadsRequestBuilder.setIgnoreIdleThreads(randomBoolean()); if (randomBoolean()) { - switch (randomIntBetween(0, 3)) { - case 3: - type = "mem"; - break; - case 2: - type = "cpu"; - break; - case 1: - type = "wait"; - break; - default: - type = "block"; - break; - } + type = switch (randomIntBetween(0, 3)) { + case 3 -> "mem"; + case 2 -> "cpu"; + case 1 -> "wait"; + default -> "block"; + }; assertThat(type, notNullValue()); nodesHotThreadsRequestBuilder.setType(HotThreads.ReportType.of(type)); } else { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 8d231de5715d9..ce22a30ac1dc2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -373,13 +373,10 @@ public void testUnassignedReplicaWithPriorCopy() throws Exception { assertThat(d.getExplanation(), startsWith("a copy of this shard is already allocated to this node [")); } else if (d.label().equals("filter") && nodeHoldingPrimary == false) { assertEquals(Decision.Type.NO, d.type()); - assertEquals( - "node does not match index setting [index.routing.allocation.include] " - + "filters [_name:\"" - + primaryNodeName - + "\"]", - d.getExplanation() - ); + assertEquals(""" + node does not match index setting [index.routing.allocation.include] \ + filters [_name:"%s"]\ + """.formatted(primaryNodeName), d.getExplanation()); } else { assertEquals(Decision.Type.YES, d.type()); assertNotNull(d.getExplanation()); @@ -948,10 +945,9 @@ public void testBetterBalanceButCannotAllocate() throws Exception { for (Decision d : result.getCanAllocateDecision().getDecisions()) { if (d.label().equals("filter")) { assertEquals(Decision.Type.NO, d.type()); - assertEquals( - "node does not match index setting [index.routing.allocation.include] filters [_name:\"" + primaryNodeName + "\"]", - d.getExplanation() - ); + assertEquals(""" + node does not match index setting [index.routing.allocation.include] filters [_name:"%s"]\ + """.formatted(primaryNodeName), d.getExplanation()); } else { assertEquals(Decision.Type.YES, d.type()); assertNotNull(d.getExplanation()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 1c646fb9beebd..ad50fa21f114a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; @@ -167,7 +167,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(rootTaskId) + .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); if (randomBoolean()) { @@ -179,7 +179,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { .get() .getTasks(); for (TaskInfo subTask : randomSubsetOf(runningTasks)) { - client().admin().cluster().prepareCancelTasks().setTaskId(subTask.getTaskId()).waitForCompletion(false).get(); + client().admin().cluster().prepareCancelTasks().setTargetTaskId(subTask.taskId()).waitForCompletion(false).get(); } } try { @@ -217,13 +217,13 @@ public void testCancelTaskMultipleTimes() throws Exception { ActionFuture mainTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); TaskId taskId = getRootTaskId(rootRequest); allowPartialRequest(rootRequest); - CancelTasksResponse resp = client().admin().cluster().prepareCancelTasks().setTaskId(taskId).waitForCompletion(false).get(); + CancelTasksResponse resp = client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); assertThat(resp.getTaskFailures(), empty()); assertThat(resp.getNodeFailures(), empty()); ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(true) .execute(); assertFalse(cancelFuture.isDone()); @@ -234,7 +234,7 @@ public void testCancelTaskMultipleTimes() throws Exception { CancelTasksResponse cancelError = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(randomBoolean()) .get(); assertThat(cancelError.getNodeFailures(), hasSize(1)); @@ -255,7 +255,7 @@ public void testDoNotWaitForCompletion() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(waitForCompletion) .execute(); if (waitForCompletion) { @@ -274,7 +274,7 @@ public void testFailedToStartChildTaskAfterCancelled() throws Exception { TestRequest rootRequest = generateTestRequest(nodes, 0, between(1, 3)); ActionFuture rootTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); TaskId taskId = getRootTaskId(rootRequest); - client().admin().cluster().prepareCancelTasks().setTaskId(taskId).waitForCompletion(false).get(); + client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); DiscoveryNode nodeWithParentTask = nodes.stream().filter(n -> n.getId().equals(taskId.getNodeId())).findFirst().get(); TransportTestAction mainAction = internalCluster().getInstance(TransportTestAction.class, nodeWithParentTask.getName()); PlainActionFuture future = new PlainActionFuture<>(); @@ -323,7 +323,7 @@ public void testRemoveBanParentsOnDisconnect() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(rootTaskId) + .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); try { @@ -385,10 +385,10 @@ static TaskId getRootTaskId(TestRequest request) throws Exception { .get(); List tasks = listTasksResponse.getTasks() .stream() - .filter(t -> t.getDescription().equals(request.taskDescription())) + .filter(t -> t.description().equals(request.taskDescription())) .collect(Collectors.toList()); assertThat(tasks, hasSize(1)); - taskId.set(tasks.get(0).getTaskId()); + taskId.set(tasks.get(0).taskId()); }); return taskId.get(); } @@ -460,8 +460,8 @@ String taskDescription() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, taskDescription(), parentTaskId, headers); + public Task createTask(long someId, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(someId, type, action, taskDescription(), parentTaskId, headers); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java index a4140b0c10bd1..786c3adfccbc2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.elasticsearch.action.support.ListenableActionFuture; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.Task; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index d94912e5e2ab6..f1be6ead32059 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -147,7 +147,7 @@ public void testMasterNodeOperationTasks() { List tasks = findEvents(ClusterHealthAction.NAME, Tuple::v1); // Verify that one of these tasks is a parent of another task - if (tasks.get(0).getParentTaskId().isSet()) { + if (tasks.get(0).parentTaskId().isSet()) { assertParentTask(Collections.singletonList(tasks.get(0)), tasks.get(1)); } else { assertParentTask(Collections.singletonList(tasks.get(1)), tasks.get(0)); @@ -217,7 +217,7 @@ public void testTransportBroadcastReplicationTasks() { logger.debug("number of shards, total: [{}], primaries: [{}] ", numberOfShards.totalNumShards, numberOfShards.numPrimaries); logger.debug("main events {}", numberOfEvents(RefreshAction.NAME, Tuple::v1)); - logger.debug("main event node {}", findEvents(RefreshAction.NAME, Tuple::v1).get(0).getTaskId().getNodeId()); + logger.debug("main event node {}", findEvents(RefreshAction.NAME, Tuple::v1).get(0).taskId().getNodeId()); logger.debug("[s] events {}", numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1)); logger.debug("[s][*] events {}", numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1)); logger.debug("nodes with the index {}", internalCluster().nodesInclude("test")); @@ -237,18 +237,18 @@ public void testTransportBroadcastReplicationTasks() { TaskInfo mainTask = findEvents(RefreshAction.NAME, Tuple::v1).get(0); List sTasks = findEvents(RefreshAction.NAME + "[s]", Tuple::v1); for (TaskInfo taskInfo : sTasks) { - if (mainTask.getTaskId().getNodeId().equals(taskInfo.getTaskId().getNodeId())) { + if (mainTask.taskId().getNodeId().equals(taskInfo.taskId().getNodeId())) { // This shard level task runs on the same node as a parent task - it should have the main task as a direct parent assertParentTask(Collections.singletonList(taskInfo), mainTask); } else { - String description = taskInfo.getDescription(); + String description = taskInfo.description(); // This shard level task runs on another node - it should have a corresponding shard level task on the node where main task // is running List sTasksOnRequestingNode = findEvents( RefreshAction.NAME + "[s]", event -> event.v1() - && mainTask.getTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) - && description.equals(event.v2().getDescription()) + && mainTask.taskId().getNodeId().equals(event.v2().taskId().getNodeId()) + && description.equals(event.v2().description()) ); // There should be only one parent task assertEquals(1, sTasksOnRequestingNode.size()); @@ -263,21 +263,21 @@ public void testTransportBroadcastReplicationTasks() { List spEvents = findEvents(RefreshAction.NAME + "[s][*]", Tuple::v1); for (TaskInfo taskInfo : spEvents) { List sTask; - if (taskInfo.getAction().endsWith("[s][p]")) { + if (taskInfo.action().endsWith("[s][p]")) { // A [s][p] level task should have a corresponding [s] level task on the same node sTask = findEvents( RefreshAction.NAME + "[s]", event -> event.v1() - && taskInfo.getTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) - && taskInfo.getDescription().equals(event.v2().getDescription()) + && taskInfo.taskId().getNodeId().equals(event.v2().taskId().getNodeId()) + && taskInfo.description().equals(event.v2().description()) ); } else { // A [s][r] level task should have a corresponding [s] level task on the a different node (where primary is located) sTask = findEvents( RefreshAction.NAME + "[s]", event -> event.v1() - && taskInfo.getParentTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) - && taskInfo.getDescription().equals(event.v2().getDescription()) + && taskInfo.parentTaskId().getNodeId().equals(event.v2().taskId().getNodeId()) + && taskInfo.description().equals(event.v2().description()) ); } // There should be only one parent task @@ -300,7 +300,7 @@ public void testTransportBulkTasks() { // the bulk operation should produce one main task List topTask = findEvents(BulkAction.NAME, Tuple::v1); assertEquals(1, topTask.size()); - assertEquals("requests[1], indices[test]", topTask.get(0).getDescription()); + assertEquals("requests[1], indices[test]", topTask.get(0).description()); // we should also get 1 or 2 [s] operation with main operation as a parent // in case the primary is located on the coordinating node we will have 1 operation, otherwise - 2 @@ -315,7 +315,7 @@ public void testTransportBulkTasks() { // and it should have the main task as a parent assertParentTask(shardTask, findEvents(BulkAction.NAME, Tuple::v1).get(0)); } else { - if (shardTasks.get(0).getParentTaskId().equals(shardTasks.get(1).getTaskId())) { + if (shardTasks.get(0).parentTaskId().equals(shardTasks.get(1).taskId())) { // task 1 is the parent of task 0, that means that task 0 will control [s][p] and [s][r] tasks shardTask = shardTasks.get(0); // in turn the parent of the task 1 should be the main task @@ -327,7 +327,7 @@ public void testTransportBulkTasks() { assertParentTask(shardTasks.get(0), findEvents(BulkAction.NAME, Tuple::v1).get(0)); } } - assertThat(shardTask.getDescription(), startsWith("requests[1], index[test][")); + assertThat(shardTask.description(), startsWith("requests[1], index[test][")); // we should also get one [s][p] operation with shard operation as a parent assertEquals(1, numberOfEvents(BulkAction.NAME + "[s][p]", Tuple::v1)); @@ -351,7 +351,7 @@ public void testSearchTaskDescriptions() { .get(); Map headers = new HashMap<>(); - headers.put(Task.X_OPAQUE_ID, "my_id"); + headers.put(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id"); headers.put("Foo-Header", "bar"); headers.put("Custom-Task-Header", "my_value"); assertSearchResponse(client().filterWithHeader(headers).prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).get()); @@ -359,38 +359,37 @@ public void testSearchTaskDescriptions() { // the search operation should produce one main task List mainTask = findEvents(SearchAction.NAME, Tuple::v1); assertEquals(1, mainTask.size()); - assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], search_type[")); - assertThat(mainTask.get(0).getDescription(), containsString("\"query\":{\"match_all\"")); + assertThat(mainTask.get(0).description(), startsWith("indices[test], search_type[")); + assertThat(mainTask.get(0).description(), containsString("\"query\":{\"match_all\"")); assertTaskHeaders(mainTask.get(0)); // check that if we have any shard-level requests they all have non-zero length description List shardTasks = findEvents(SearchAction.NAME + "[*]", Tuple::v1); for (TaskInfo taskInfo : shardTasks) { - assertThat(taskInfo.getParentTaskId(), notNullValue()); - assertEquals(mainTask.get(0).getTaskId(), taskInfo.getParentTaskId()); + assertThat(taskInfo.parentTaskId(), notNullValue()); + assertEquals(mainTask.get(0).taskId(), taskInfo.parentTaskId()); assertTaskHeaders(taskInfo); - switch (taskInfo.getAction()) { - case SearchTransportService.QUERY_ACTION_NAME: - case SearchTransportService.DFS_ACTION_NAME: - assertTrue(taskInfo.getDescription(), Regex.simpleMatch("shardId[[test][*]]", taskInfo.getDescription())); - break; - case SearchTransportService.QUERY_ID_ACTION_NAME: - assertTrue(taskInfo.getDescription(), Regex.simpleMatch("id[*], indices[test]", taskInfo.getDescription())); - break; - case SearchTransportService.FETCH_ID_ACTION_NAME: - assertTrue( - taskInfo.getDescription(), - Regex.simpleMatch("id[*], size[1], lastEmittedDoc[null]", taskInfo.getDescription()) - ); - break; - case SearchTransportService.QUERY_CAN_MATCH_NAME: - assertTrue(taskInfo.getDescription(), Regex.simpleMatch("shardId[[test][*]]", taskInfo.getDescription())); - break; - default: - fail("Unexpected action [" + taskInfo.getAction() + "] with description [" + taskInfo.getDescription() + "]"); + switch (taskInfo.action()) { + case SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.DFS_ACTION_NAME -> assertTrue( + taskInfo.description(), + Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) + ); + case SearchTransportService.QUERY_ID_ACTION_NAME -> assertTrue( + taskInfo.description(), + Regex.simpleMatch("id[*], indices[test]", taskInfo.description()) + ); + case SearchTransportService.FETCH_ID_ACTION_NAME -> assertTrue( + taskInfo.description(), + Regex.simpleMatch("id[*], size[1], lastEmittedDoc[null]", taskInfo.description()) + ); + case SearchTransportService.QUERY_CAN_MATCH_NAME -> assertTrue( + taskInfo.description(), + Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) + ); + default -> fail("Unexpected action [" + taskInfo.action() + "] with description [" + taskInfo.description() + "]"); } // assert that all task descriptions have non-zero length - assertThat(taskInfo.getDescription().length(), greaterThan(0)); + assertThat(taskInfo.description().length(), greaterThan(0)); } } @@ -399,7 +398,7 @@ public void testSearchTaskHeaderLimit() { int maxSize = Math.toIntExact(SETTING_HTTP_MAX_HEADER_SIZE.getDefault(Settings.EMPTY).getBytes() / 2 + 1); Map headers = new HashMap<>(); - headers.put(Task.X_OPAQUE_ID, "my_id"); + headers.put(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id"); headers.put("Custom-Task-Header", randomAlphaOfLengthBetween(maxSize, maxSize + 100)); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, @@ -409,9 +408,9 @@ public void testSearchTaskHeaderLimit() { } private void assertTaskHeaders(TaskInfo taskInfo) { - assertThat(taskInfo.getHeaders().keySet(), hasSize(2)); - assertEquals("my_id", taskInfo.getHeaders().get(Task.X_OPAQUE_ID)); - assertEquals("my_value", taskInfo.getHeaders().get("Custom-Task-Header")); + assertThat(taskInfo.headers().keySet(), hasSize(2)); + assertEquals("my_id", taskInfo.headers().get(Task.X_OPAQUE_ID_HTTP_HEADER)); + assertEquals("my_value", taskInfo.headers().get("Custom-Task-Header")); } /** @@ -464,19 +463,19 @@ public void waitForTaskCompletion(Task task) {} .get(); assertThat(listResponse.getTasks(), not(empty())); for (TaskInfo task : listResponse.getTasks()) { - assertNotNull(task.getStatus()); - GetTaskResponse getResponse = client().admin().cluster().prepareGetTask(task.getTaskId()).get(); + assertNotNull(task.status()); + GetTaskResponse getResponse = client().admin().cluster().prepareGetTask(task.taskId()).get(); assertFalse("task should still be running", getResponse.getTask().isCompleted()); TaskInfo fetchedWithGet = getResponse.getTask().getTask(); - assertEquals(task.getId(), fetchedWithGet.getId()); - assertEquals(task.getType(), fetchedWithGet.getType()); - assertEquals(task.getAction(), fetchedWithGet.getAction()); - assertEquals(task.getDescription(), fetchedWithGet.getDescription()); - assertEquals(task.getStatus(), fetchedWithGet.getStatus()); - assertEquals(task.getStartTime(), fetchedWithGet.getStartTime()); - assertThat(fetchedWithGet.getRunningTimeNanos(), greaterThanOrEqualTo(task.getRunningTimeNanos())); - assertEquals(task.isCancellable(), fetchedWithGet.isCancellable()); - assertEquals(task.getParentTaskId(), fetchedWithGet.getParentTaskId()); + assertEquals(task.id(), fetchedWithGet.id()); + assertEquals(task.type(), fetchedWithGet.type()); + assertEquals(task.action(), fetchedWithGet.action()); + assertEquals(task.description(), fetchedWithGet.description()); + assertEquals(task.status(), fetchedWithGet.status()); + assertEquals(task.startTime(), fetchedWithGet.startTime()); + assertThat(fetchedWithGet.runningTimeNanos(), greaterThanOrEqualTo(task.runningTimeNanos())); + assertEquals(task.cancellable(), fetchedWithGet.cancellable()); + assertEquals(task.parentTaskId(), fetchedWithGet.parentTaskId()); } } finally { letTaskFinish.countDown(); @@ -562,7 +561,7 @@ public void testListTasksWaitForCompletion() throws Exception { assertThat(response.getTaskFailures(), empty()); assertThat(response.getTasks(), hasSize(1)); TaskInfo task = response.getTasks().get(0); - assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.getAction()); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.action()); } ); } @@ -577,7 +576,7 @@ public void testGetTaskWaitForCompletionWithoutStoringResult() throws Exception assertNull(response.getTask().getResponse()); // But the task's details should still be there because we grabbed a reference to the task before waiting for it to complete assertNotNull(response.getTask().getTask()); - assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction()); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().action()); } ); } @@ -592,7 +591,7 @@ public void testGetTaskWaitForCompletionWithStoringResult() throws Exception { assertEquals(0, response.getTask().getResponseAsMap().get("failure_count")); // The task's details should also be there assertNotNull(response.getTask().getTask()); - assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction()); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().action()); } ); } @@ -724,7 +723,7 @@ private TaskId waitForTestTaskStartOnAllNodes() throws Exception { }); List task = client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME).get().getTasks(); assertThat(task, hasSize(1)); - return task.get(0).getTaskId(); + return task.get(0).taskId(); } public void testTasksListWaitForNoTask() throws Exception { @@ -785,32 +784,32 @@ public void testTaskStoringSuccessfulResult() throws Exception { assertEquals(1, events.size()); TaskInfo taskInfo = events.get(0); - TaskId taskId = taskInfo.getTaskId(); + TaskId taskId = taskInfo.taskId(); TaskResult taskResult = client().admin().cluster().getTask(new GetTaskRequest().setTaskId(taskId)).get().getTask(); assertTrue(taskResult.isCompleted()); assertNull(taskResult.getError()); - assertEquals(taskInfo.getTaskId(), taskResult.getTask().getTaskId()); - assertEquals(taskInfo.getParentTaskId(), taskResult.getTask().getParentTaskId()); - assertEquals(taskInfo.getType(), taskResult.getTask().getType()); - assertEquals(taskInfo.getAction(), taskResult.getTask().getAction()); - assertEquals(taskInfo.getDescription(), taskResult.getTask().getDescription()); - assertEquals(taskInfo.getStartTime(), taskResult.getTask().getStartTime()); - assertEquals(taskInfo.getHeaders(), taskResult.getTask().getHeaders()); + assertEquals(taskInfo.taskId(), taskResult.getTask().taskId()); + assertEquals(taskInfo.parentTaskId(), taskResult.getTask().parentTaskId()); + assertEquals(taskInfo.type(), taskResult.getTask().type()); + assertEquals(taskInfo.action(), taskResult.getTask().action()); + assertEquals(taskInfo.description(), taskResult.getTask().description()); + assertEquals(taskInfo.startTime(), taskResult.getTask().startTime()); + assertEquals(taskInfo.headers(), taskResult.getTask().headers()); Map result = taskResult.getResponseAsMap(); assertEquals("0", result.get("failure_count").toString()); assertNoFailures(client().admin().indices().prepareRefresh(TaskResultsService.TASK_INDEX).get()); SearchResponse searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.getAction()))) + .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.action()))) .get(); assertEquals(1L, searchResponse.getHits().getTotalHits().value); searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.getTaskId().getNodeId()))) + .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.taskId().getNodeId()))) .get(); assertEquals(1L, searchResponse.getHits().getTotalHits().value); @@ -841,18 +840,18 @@ public void testTaskStoringFailureResult() throws Exception { List events = findEvents(TestTaskPlugin.TestTaskAction.NAME, Tuple::v1); assertEquals(1, events.size()); TaskInfo failedTaskInfo = events.get(0); - TaskId failedTaskId = failedTaskInfo.getTaskId(); + TaskId failedTaskId = failedTaskInfo.taskId(); TaskResult taskResult = client().admin().cluster().getTask(new GetTaskRequest().setTaskId(failedTaskId)).get().getTask(); assertTrue(taskResult.isCompleted()); assertNull(taskResult.getResponse()); - assertEquals(failedTaskInfo.getTaskId(), taskResult.getTask().getTaskId()); - assertEquals(failedTaskInfo.getType(), taskResult.getTask().getType()); - assertEquals(failedTaskInfo.getAction(), taskResult.getTask().getAction()); - assertEquals(failedTaskInfo.getDescription(), taskResult.getTask().getDescription()); - assertEquals(failedTaskInfo.getStartTime(), taskResult.getTask().getStartTime()); - assertEquals(failedTaskInfo.getHeaders(), taskResult.getTask().getHeaders()); + assertEquals(failedTaskInfo.taskId(), taskResult.getTask().taskId()); + assertEquals(failedTaskInfo.type(), taskResult.getTask().type()); + assertEquals(failedTaskInfo.action(), taskResult.getTask().action()); + assertEquals(failedTaskInfo.description(), taskResult.getTask().description()); + assertEquals(failedTaskInfo.startTime(), taskResult.getTask().startTime()); + assertEquals(failedTaskInfo.headers(), taskResult.getTask().headers()); Map error = (Map) taskResult.getErrorAsMap(); assertEquals("Simulating operation failure", error.get("reason")); assertEquals("illegal_state_exception", error.get("type")); @@ -911,7 +910,7 @@ public void onFailure(Exception e) { // Now we can find it! GetTaskResponse response = expectFinishedTask(new TaskId("fake:1")); - assertEquals("test", response.getTask().getTask().getAction()); + assertEquals("test", response.getTask().getTask().action()); assertNotNull(response.getTask().getError()); assertNull(response.getTask().getResponse()); } @@ -991,10 +990,10 @@ private void assertParentTask(List tasks, TaskInfo parentTask) { } private void assertParentTask(TaskInfo task, TaskInfo parentTask) { - assertTrue(task.getParentTaskId().isSet()); - assertEquals(parentTask.getTaskId().getNodeId(), task.getParentTaskId().getNodeId()); - assertTrue(Strings.hasLength(task.getParentTaskId().getNodeId())); - assertEquals(parentTask.getId(), task.getParentTaskId().getId()); + assertTrue(task.parentTaskId().isSet()); + assertEquals(parentTask.taskId().getNodeId(), task.parentTaskId().getNodeId()); + assertTrue(Strings.hasLength(task.parentTaskId().getNodeId())); + assertEquals(parentTask.id(), task.parentTaskId().getId()); } private void expectNotFound(ThrowingRunnable r) { @@ -1013,8 +1012,8 @@ private GetTaskResponse expectFinishedTask(TaskId taskId) throws IOException { GetTaskResponse response = client().admin().cluster().prepareGetTask(taskId).get(); assertTrue("the task should have been completed before fetching", response.getTask().isCompleted()); TaskInfo info = response.getTask().getTask(); - assertEquals(taskId, info.getTaskId()); - assertNull(info.getStatus()); // The test task doesn't have any status + assertEquals(taskId, info.taskId()); + assertNull(info.status()); // The test task doesn't have any status return response; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 07201d4286963..9d4b23e1206b4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Priority; @@ -254,15 +254,26 @@ public void testFieldTypes() { assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertTrue(response.getIndicesStats().getMappings().getFieldTypeStats().isEmpty()); - client().admin().indices().prepareCreate("test1").setMapping("{\"properties\":{\"foo\":{\"type\": \"keyword\"}}}").get(); - client().admin() - .indices() - .prepareCreate("test2") - .setMapping( - "{\"properties\":{\"foo\":{\"type\": \"keyword\"},\"bar\":{\"properties\":{\"baz\":{\"type\":\"keyword\"}," - + "\"eggplant\":{\"type\":\"integer\"}}}}}" - ) - .get(); + client().admin().indices().prepareCreate("test1").setMapping(""" + {"properties":{"foo":{"type": "keyword"}}}""").get(); + client().admin().indices().prepareCreate("test2").setMapping(""" + { + "properties": { + "foo": { + "type": "keyword" + }, + "bar": { + "properties": { + "baz": { + "type": "keyword" + }, + "eggplant": { + "type": "integer" + } + } + } + } + }""").get(); response = client().admin().cluster().prepareClusterStats().get(); assertThat(response.getIndicesStats().getMappings().getFieldTypeStats().size(), equalTo(3)); Set stats = response.getIndicesStats().getMappings().getFieldTypeStats(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java index 337d1e819583e..c97c4fc5b6892 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java @@ -17,7 +17,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @@ -49,7 +48,7 @@ public void testClearIndicesCacheWithBlocks() { } } // Request is blocked - for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA, SETTING_READ_ONLY_ALLOW_DELETE)) { + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { try { enableIndexBlock("test", blockSetting); assertBlocked( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java index 967e20f9be2ba..bda0af2445c4c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -8,24 +8,58 @@ package org.elasticsearch.action.admin.indices.create; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.TestSystemIndexPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.snapshots.SystemIndicesSnapshotIT; import org.elasticsearch.test.ESIntegTestCase; +import org.junit.After; import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_HIDDEN; import static org.elasticsearch.indices.TestSystemIndexDescriptor.INDEX_NAME; import static org.elasticsearch.indices.TestSystemIndexDescriptor.PRIMARY_INDEX_NAME; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class AutoCreateSystemIndexIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return CollectionUtils.appendToCopy(super.nodePlugins(), TestSystemIndexPlugin.class); + return CollectionUtils.appendToCopy( + CollectionUtils.appendToCopy(super.nodePlugins(), TestSystemIndexPlugin.class), + UnmanagedSystemIndexTestPlugin.class + ); + } + + @After + public void afterEach() { + assertAcked(client().admin().indices().prepareDeleteTemplate("*").get()); + client().admin().indices().prepareDelete(PRIMARY_INDEX_NAME); } public void testAutoCreatePrimaryIndex() throws Exception { @@ -51,4 +85,132 @@ public void testAutoCreateNonPrimaryIndex() throws Exception { GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices(INDEX_NAME + "-2").get(); assertThat(response.indices().length, is(1)); } + + public void testSystemIndicesAutoCreatedAsHidden() throws Exception { + CreateIndexRequest request = new CreateIndexRequest(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME); + client().execute(AutoCreateAction.INSTANCE, request).get(); + + GetIndexResponse response = client().admin() + .indices() + .prepareGetIndex() + .addIndices(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME) + .get(); + assertThat(response.indices().length, is(1)); + Settings settings = response.settings().get(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME); + assertThat(settings, notNullValue()); + assertThat(settings.getAsBoolean(SETTING_INDEX_HIDDEN, false), is(true)); + } + + public void testSystemIndicesAutoCreateRejectedWhenNotHidden() { + CreateIndexRequest request = new CreateIndexRequest(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME); + request.settings(Settings.builder().put(SETTING_INDEX_HIDDEN, false).build()); + ExecutionException exception = expectThrows( + ExecutionException.class, + () -> client().execute(AutoCreateAction.INSTANCE, request).get() + ); + + assertThat( + exception.getCause().getMessage(), + containsString("Cannot auto-create system index [.unmanaged-system-idx] with [index.hidden] set to 'false'") + ); + } + + /** + * Check that a template applying a system alias creates a hidden alias. + */ + public void testAutoCreateSystemAliasViaV1Template() throws Exception { + assertAcked( + client().admin() + .indices() + .preparePutTemplate("test-template") + .setPatterns(List.of(INDEX_NAME + "*")) + .addAlias(new Alias(INDEX_NAME + "-legacy-alias")) + .get() + ); + + String nonPrimaryIndex = INDEX_NAME + "-2"; + CreateIndexRequest request = new CreateIndexRequest(nonPrimaryIndex); + assertAcked(client().execute(AutoCreateAction.INSTANCE, request).get()); + + assertTrue(indexExists(nonPrimaryIndex)); + + assertAliasesHidden(nonPrimaryIndex, Set.of(".test-index", ".test-index-legacy-alias")); + + assertAcked(client().admin().indices().prepareDeleteTemplate("*").get()); + } + + /** + * Check that a composable template applying a system alias creates a hidden alias. + */ + public void testAutoCreateSystemAliasViaComposableTemplate() throws Exception { + ComposableIndexTemplate cit = new ComposableIndexTemplate( + Collections.singletonList(INDEX_NAME + "*"), + new Template( + null, + null, + Map.of(INDEX_NAME + "-composable-alias", AliasMetadata.builder(INDEX_NAME + "-composable-alias").build()) + ), + Collections.emptyList(), + 4L, + 5L, + Collections.emptyMap() + ); + assertAcked( + client().execute( + PutComposableIndexTemplateAction.INSTANCE, + new PutComposableIndexTemplateAction.Request("test-composable-template").indexTemplate(cit) + ).get() + ); + + String nonPrimaryIndex = INDEX_NAME + "-2"; + CreateIndexRequest request = new CreateIndexRequest(nonPrimaryIndex); + assertAcked(client().execute(AutoCreateAction.INSTANCE, request).get()); + + assertTrue(indexExists(nonPrimaryIndex)); + + assertAliasesHidden(nonPrimaryIndex, Set.of(".test-index", ".test-index-composable-alias")); + + assertAcked( + client().execute( + DeleteComposableIndexTemplateAction.INSTANCE, + new DeleteComposableIndexTemplateAction.Request("test-composable-template") + ).get() + ); + } + + private void assertAliasesHidden(String nonPrimaryIndex, Set aliasNames) throws InterruptedException, ExecutionException { + final GetAliasesResponse getAliasesResponse = client().admin() + .indices() + .getAliases(new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden())) + .get(); + + assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); + assertThat(getAliasesResponse.getAliases().get(nonPrimaryIndex).size(), equalTo(2)); + assertThat( + getAliasesResponse.getAliases().get(nonPrimaryIndex).stream().map(AliasMetadata::alias).collect(Collectors.toSet()), + equalTo(aliasNames) + ); + getAliasesResponse.getAliases().get(nonPrimaryIndex).forEach(alias -> assertThat(alias.isHidden(), is(true))); + } + + public static class UnmanagedSystemIndexTestPlugin extends Plugin implements SystemIndexPlugin { + + public static final String SYSTEM_INDEX_NAME = ".unmanaged-system-idx"; + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + return Collections.singletonList(new SystemIndexDescriptor(SYSTEM_INDEX_NAME + "*", "System indices for tests")); + } + + @Override + public String getFeatureName() { + return SystemIndicesSnapshotIT.SystemIndexTestPlugin.class.getSimpleName(); + } + + @Override + public String getFeatureDescription() { + return "A simple test plugin"; + } + } + } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java index 41a346696417e..1331c2e50c1e7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java @@ -8,11 +8,25 @@ package org.elasticsearch.action.admin.indices.create; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -21,12 +35,19 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; +import org.junit.After; import org.junit.Before; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.elasticsearch.indices.TestSystemIndexDescriptor.INDEX_NAME; import static org.elasticsearch.indices.TestSystemIndexDescriptor.PRIMARY_INDEX_NAME; @@ -42,6 +63,14 @@ public void beforeEach() { TestSystemIndexDescriptor.useNewMappings.set(false); } + @After + public void afterEach() throws Exception { + assertAcked(client().admin().indices().prepareDeleteTemplate("*").get()); + assertAcked( + client().execute(DeleteComposableIndexTemplateAction.INSTANCE, new DeleteComposableIndexTemplateAction.Request("*")).get() + ); + } + @Override protected Collection> nodePlugins() { return CollectionUtils.appendToCopy(super.nodePlugins(), TestSystemIndexPlugin.class); @@ -91,6 +120,70 @@ public void testCreateSystemIndexViaConcreteName() { doCreateTest(() -> assertAcked(prepareCreate(PRIMARY_INDEX_NAME)), PRIMARY_INDEX_NAME); } + /** + * Check that a legacy template applying a system alias creates a hidden alias. + */ + public void testCreateSystemAliasViaV1Template() throws Exception { + assertAcked( + client().admin() + .indices() + .preparePutTemplate("test-template") + .setPatterns(List.of(INDEX_NAME + "*")) + .addAlias(new Alias(INDEX_NAME + "-legacy-alias")) + .get() + ); + + assertAcked(prepareCreate(INDEX_NAME + "-2")); + ensureGreen(PRIMARY_INDEX_NAME); + + assertTrue(indexExists(PRIMARY_INDEX_NAME)); + assertFalse(indexExists(INDEX_NAME + "-2")); + + assertHasAliases(Set.of(".test-index", ".test-index-legacy-alias")); + + assertAcked(client().admin().indices().prepareDeleteTemplate("*").get()); + } + + /** + * Check that a composable template applying a system alias creates a hidden alias. + */ + public void testCreateSystemAliasViaComposableTemplate() throws Exception { + ComposableIndexTemplate cit = new ComposableIndexTemplate( + Collections.singletonList(INDEX_NAME + "*"), + new Template( + null, + null, + Map.of(INDEX_NAME + "-composable-alias", AliasMetadata.builder(INDEX_NAME + "-composable-alias").build()) + ), + Collections.emptyList(), + 4L, + 5L, + Collections.emptyMap() + ); + assertAcked( + client().execute( + PutComposableIndexTemplateAction.INSTANCE, + new PutComposableIndexTemplateAction.Request("test-composable-template").indexTemplate(cit) + ).get() + ); + + assertAcked(prepareCreate(INDEX_NAME + "-2")); + ensureGreen(PRIMARY_INDEX_NAME); + + // Attempting to directly create a non-primary system index only creates the primary index + assertTrue(indexExists(PRIMARY_INDEX_NAME)); + assertFalse(indexExists(INDEX_NAME + "-2")); + + assertHasAliases(Set.of(".test-index", ".test-index-composable-alias")); + + assertAcked( + client().execute( + DeleteComposableIndexTemplateAction.INSTANCE, + new DeleteComposableIndexTemplateAction.Request("test-composable-template") + ).get() + ); + } + private void doCreateTest(Runnable runnable, String concreteIndex) { internalCluster().startNodes(1); @@ -112,6 +205,63 @@ private void doCreateTest(Runnable runnable, String concreteIndex) { ensureGreen(INDEX_NAME); assertMappingsAndSettings(TestSystemIndexDescriptor.getNewMappings(), concreteIndex); + assertAliases(concreteIndex); + } + + public void testConcurrentAutoCreates() throws InterruptedException { + internalCluster().startNodes(3); + + final Client client = client(); + final int count = randomIntBetween(5, 30); + final CountDownLatch latch = new CountDownLatch(count); + final ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(BulkResponse o) { + latch.countDown(); + assertFalse(o.hasFailures()); + } + + @Override + public void onFailure(Exception e) { + latch.countDown(); + throw new AssertionError(e); + } + }; + for (int i = 0; i < count; i++) { + client.bulk(new BulkRequest().add(new IndexRequest(INDEX_NAME).source(Map.of("foo", "bar"))), listener); + } + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + /** + * Make sure that aliases are created hidden + */ + private void assertAliases(String concreteIndex) { + final GetAliasesResponse getAliasesResponse = client().admin() + .indices() + .getAliases(new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden())) + .actionGet(); + + assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); + assertThat(getAliasesResponse.getAliases().get(concreteIndex).size(), equalTo(1)); + assertThat(getAliasesResponse.getAliases().get(concreteIndex).get(0).isHidden(), equalTo(true)); + } + + private void assertHasAliases(Set aliasNames) throws InterruptedException, java.util.concurrent.ExecutionException { + final GetAliasesResponse getAliasesResponse = client().admin() + .indices() + .getAliases(new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden())) + .get(); + + // Attempting to directly create a non-primary system index only creates the primary index + assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); + assertThat(getAliasesResponse.getAliases().get(PRIMARY_INDEX_NAME).size(), equalTo(2)); + assertThat( + getAliasesResponse.getAliases().get(PRIMARY_INDEX_NAME).stream().map(AliasMetadata::alias).collect(Collectors.toSet()), + equalTo(aliasNames) + ); + assertThat(getAliasesResponse.getAliases().get(PRIMARY_INDEX_NAME).get(0).isHidden(), equalTo(true)); + assertThat(getAliasesResponse.getAliases().get(PRIMARY_INDEX_NAME).get(1).isHidden(), equalTo(true)); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index e7502fb246012..c68908918970e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -632,7 +632,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { IndexMetadata target = clusterStateResponse.getState().getMetadata().index("target"); client().admin().indices().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get(); IndicesSegmentResponse targetSegStats = client().admin().indices().prepareSegments("target").get(); - ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).getShards()[0]; + ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).shards()[0]; assertTrue(segmentsStats.getNumberOfCommitted() > 0); assertNotEquals(segmentsStats.getSegments(), segmentsStats.getNumberOfCommitted()); @@ -648,7 +648,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { } assertBusy(() -> { IndicesSegmentResponse targetStats = client().admin().indices().prepareSegments("target").get(); - ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).getShards()[0]; + ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).shards()[0]; Map source = sourceStats.getIndices().get("source").getShards(); int numSourceSegments = 0; for (IndexShardSegments s : source.values()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index eb1267c6beb8a..8e91fc0ae0926 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -26,7 +26,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java index a32695e586c54..64a365ea2b3e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java @@ -15,11 +15,21 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; +import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; public class DeleteIndexBlocksIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) // we control the read-only-allow-delete block + .build(); + } + public void testDeleteIndexWithBlocks() { createIndex("test"); ensureGreen("test"); @@ -44,10 +54,6 @@ public void testDeleteIndexOnIndexReadOnlyAllowDeleteSetting() { client().prepareIndex().setIndex("test").setId("2").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK ); - assertBlocked( - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 2)), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); assertSearchHits(client().prepareSearch().get(), "1"); assertAcked(client().admin().indices().prepareDelete("test")); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java index eef3300b3faf6..590f84849095b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -18,6 +19,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; +import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @@ -25,6 +27,14 @@ @ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class ForceMergeBlocksIT extends ESIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) // we control the read-only-allow-delete block + .build(); + } + public void testForceMergeWithBlocks() { createIndex("test"); ensureGreen("test"); @@ -37,7 +47,7 @@ public void testForceMergeWithBlocks() { } // Request is not blocked - for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test", blockSetting); ForceMergeResponse response = client().admin().indices().prepareForceMerge("test").execute().actionGet(); @@ -49,7 +59,7 @@ public void testForceMergeWithBlocks() { } // Request is blocked - for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA, SETTING_READ_ONLY_ALLOW_DELETE)) { + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { try { enableIndexBlock("test", blockSetting); assertBlocked(client().admin().indices().prepareForceMerge("test")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index 4c08fe4ec367c..b3d5dbada7f62 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.admin.indices.get; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.support.IndicesOptions; @@ -23,6 +21,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_METADATA_BLOCK; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; @@ -287,8 +286,8 @@ private void assertEmptyMappings(GetIndexResponse response) { private void assertEmptyAliases(GetIndexResponse response) { assertThat(response.aliases(), notNullValue()); - for (final ObjectObjectCursor> entry : response.getAliases()) { - assertTrue(entry.value.isEmpty()); + for (final Map.Entry> entry : response.getAliases().entrySet()) { + assertTrue(entry.getValue().isEmpty()); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 35acad2af85c5..7e12e06e00ce3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -8,12 +8,9 @@ package org.elasticsearch.action.admin.indices.shards; -import com.carrotsearch.hppc.cursors.IntObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -81,8 +78,8 @@ public void testBasic() throws Exception { assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); ImmutableOpenIntMap> shardStores = response.getStoreStatuses().get(index); assertThat(shardStores.values().size(), equalTo(2)); - for (ObjectCursor> shardStoreStatuses : shardStores.values()) { - for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) { + for (Map.Entry> shardStoreStatuses : shardStores.entrySet()) { + for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.getValue()) { assertThat(storeStatus.getAllocationId(), notNullValue()); assertThat(storeStatus.getNode(), notNullValue()); assertThat(storeStatus.getStoreException(), nullValue()); @@ -103,11 +100,11 @@ public void testBasic() throws Exception { assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); ImmutableOpenIntMap> shardStoresStatuses = response.getStoreStatuses().get(index); assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); - for (IntObjectCursor> storesStatus : shardStoresStatuses) { - assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); + for (Map.Entry> storesStatus : shardStoresStatuses.entrySet()) { + assertThat("must report for one store", storesStatus.getValue().size(), equalTo(1)); assertThat( "reported store should be primary", - storesStatus.value.get(0).getAllocationStatus(), + storesStatus.getValue().get(0).getAllocationStatus(), equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY) ); } @@ -187,18 +184,18 @@ public void testCorruptedShards() throws Exception { ImmutableOpenIntMap> shardStatuses = rsp.getStoreStatuses().get(index); assertNotNull(shardStatuses); assertThat(shardStatuses.size(), greaterThan(0)); - for (IntObjectCursor> shardStatus : shardStatuses) { - for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) { - if (corruptedShardIDMap.containsKey(shardStatus.key) - && corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().getName())) { + for (Map.Entry> shardStatus : shardStatuses.entrySet()) { + for (IndicesShardStoresResponse.StoreStatus status : shardStatus.getValue()) { + if (corruptedShardIDMap.containsKey(shardStatus.getKey()) + && corruptedShardIDMap.get(shardStatus.getKey()).contains(status.getNode().getName())) { assertThat( - "shard [" + shardStatus.key + "] is failed on node [" + status.getNode().getName() + "]", + "shard [" + shardStatus.getKey() + "] is failed on node [" + status.getNode().getName() + "]", status.getStoreException(), notNullValue() ); } else { assertNull( - "shard [" + shardStatus.key + "] is not failed on node [" + status.getNode().getName() + "]", + "shard [" + shardStatus.getKey() + "] is not failed on node [" + status.getNode().getName() + "]", status.getStoreException() ); } @@ -235,11 +232,11 @@ private Set findNodesWithShard(String index) { ClusterState state = client().admin().cluster().prepareState().get().getState(); IndexRoutingTable indexRoutingTable = state.routingTable().index(index); List startedShards = indexRoutingTable.shardsWithState(ShardRoutingState.STARTED); - Set nodesWithShard = new HashSet<>(); + Set nodesNamesWithShard = new HashSet<>(); for (ShardRouting startedShard : startedShards) { - nodesWithShard.add(state.nodes().get(startedShard.currentNodeId()).getName()); + nodesNamesWithShard.add(state.nodes().get(startedShard.currentNodeId()).getName()); } - return nodesWithShard; + return nodesNamesWithShard; } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorIT.java index 1c4a6be5c6c6b..035d807834798 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorIT.java @@ -14,8 +14,8 @@ import org.elasticsearch.action.get.MultiGetRequestBuilder; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index ec45fdf506f2c..8267dc2319591 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -100,8 +100,7 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) // validate all responses boolean rejectedAfterAllRetries = false; for (Object response : responses) { - if (response instanceof BulkResponse) { - BulkResponse bulkResponse = (BulkResponse) response; + if (response instanceof BulkResponse bulkResponse) { for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) { if (bulkItemResponse.isFailed()) { BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 4c26db470ef98..77479df262ebe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/ingest/AsyncIngestProcessorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/ingest/AsyncIngestProcessorIT.java index 73246ae3e2b67..d2fdb248f7b4b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/ingest/AsyncIngestProcessorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/ingest/AsyncIngestProcessorIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index 05562a8518d73..606e023b6493a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.search.builder.PointInTimeBuilder; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index d3a5a24dd731c..32c8ee1d6ec8a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -8,13 +8,13 @@ package org.elasticsearch.action.search; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -241,21 +241,38 @@ public void testIndexNotFound() { } refresh(); String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); - SearchResponse resp1 = client().prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)).get(); - assertNoFailures(resp1); - assertHitCount(resp1, index1 + index2); - client().admin().indices().prepareDelete("index-1").get(); - if (randomBoolean()) { - SearchResponse resp2 = client().prepareSearch("index-*").get(); - assertNoFailures(resp2); - assertHitCount(resp2, index2); + try { + SearchResponse resp = client().prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)).get(); + assertNoFailures(resp); + assertHitCount(resp, index1 + index2); + client().admin().indices().prepareDelete("index-1").get(); + if (randomBoolean()) { + resp = client().prepareSearch("index-*").get(); + assertNoFailures(resp); + assertHitCount(resp, index2); + } + + // Allow partial search result + resp = client().prepareSearch() + .setPreference(null) + .setAllowPartialSearchResults(true) + .setPointInTime(new PointInTimeBuilder(pit)) + .get(); + assertFailures(resp); + assertHitCount(resp, index2); + // Do not allow partial search result + expectThrows( + ElasticsearchException.class, + () -> client().prepareSearch() + .setPreference(null) + .setAllowPartialSearchResults(false) + .setPointInTime(new PointInTimeBuilder(pit)) + .get() + ); + } finally { + closePointInTime(pit); } - expectThrows( - IndexNotFoundException.class, - () -> client().prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)).get() - ); - closePointInTime(resp1.pointInTimeId()); } public void testCanMatch() throws Exception { @@ -263,10 +280,8 @@ public void testCanMatch() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(5, 10)) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.timeValueMillis(randomIntBetween(50, 100))); - assertAcked( - prepareCreate("test").setSettings(settings) - .setMapping("{\"properties\":{\"created_date\":{\"type\": \"date\", \"format\": \"yyyy-MM-dd\"}}}") - ); + assertAcked(prepareCreate("test").setSettings(settings).setMapping(""" + {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")); ensureGreen("test"); String pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java index b704530f6f8a2..5b333a4ebae93 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java @@ -10,8 +10,8 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -107,12 +107,12 @@ private void testCase(NodeClient client, SearchRequest request, List shards, + List searchShards, List skippedShards, SearchResponse.Clusters clusters, boolean fetchPhase ) { - shardsListener.set(shards); + shardsListener.set(searchShards); assertEquals(fetchPhase, hasFetchPhase); } @@ -141,12 +141,12 @@ public void onFetchFailure(int shardIndex, SearchShardTarget shardTarget, Except } @Override - public void onPartialReduce(List shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) { + public void onPartialReduce(List searchShards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) { numReduces.incrementAndGet(); } @Override - public void onFinalReduce(List shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) { + public void onFinalReduce(List searchShards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) { numReduces.incrementAndGet(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index 092d154e2bede..e2a8bd3667e65 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; @@ -420,10 +420,8 @@ public void testSearchIdle() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.timeValueMillis(randomIntBetween(50, 500))); - assertAcked( - prepareCreate("test").setSettings(settings) - .setMapping("{\"properties\":{\"created_date\":{\"type\": \"date\", \"format\": \"yyyy-MM-dd\"}}}") - ); + assertAcked(prepareCreate("test").setSettings(settings).setMapping(""" + {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")); ensureGreen("test"); assertBusy(() -> { for (String node : internalCluster().nodesInclude("test")) { @@ -672,7 +670,7 @@ public Aggregator parent() { } @Override - public Aggregator subAggregator(String name) { + public Aggregator subAggregator(String aggregatorName) { return null; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java index ced597dc4ca7a..1b19e8dd83e08 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java @@ -126,6 +126,8 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { } private String source(String id, String nameValue) { - return "{ \"type1\" : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }"; + return """ + { "type1" : { "id" : "%s", "name" : "%s" } } + """.formatted(id, nameValue); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index 29bb176646e26..f2c4d1b2fa5c9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -260,32 +260,26 @@ public void testRandomSingleTermVectors() throws IOException { boolean storeOffsets = false; boolean storeTermVectors = false; switch (config) { - case 0: { + case 0 -> { // do nothing - break; } - case 1: { + case 1 -> { storeTermVectors = true; - break; } - case 2: { + case 2 -> { storeTermVectors = true; storePositions = true; - break; } - case 3: { + case 3 -> { storeTermVectors = true; storeOffsets = true; - break; } - case 4: { + case 4 -> { storeTermVectors = true; storePositions = true; storeOffsets = true; - break; } - default: - throw new IllegalArgumentException("Unsupported option: " + config); + default -> throw new IllegalArgumentException("Unsupported option: " + config); } ft.setStoreTermVectors(storeTermVectors); ft.setStoreTermVectorOffsets(storeOffsets); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index c61a4cfeb9acd..cb5a064c5e262 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.aliases; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; @@ -46,15 +44,16 @@ import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.client.Requests.createIndexRequest; -import static org.elasticsearch.client.Requests.deleteRequest; -import static org.elasticsearch.client.Requests.indexRequest; +import static org.elasticsearch.client.internal.Requests.createIndexRequest; +import static org.elasticsearch.client.internal.Requests.deleteRequest; +import static org.elasticsearch.client.internal.Requests.indexRequest; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_METADATA_BLOCK; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_BLOCK; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; @@ -213,10 +212,8 @@ public void testFilteringAliases() throws Exception { logger.info("--> making sure that filter was stored with alias [alias1] and filter [user:kimchy]"); ClusterState clusterState = admin().cluster().prepareState().get().getState(); IndexMetadata indexMd = clusterState.metadata().index("test"); - assertThat( - indexMd.getAliases().get("alias1").filter().string(), - equalTo("{\"term\":{\"user\":{\"value\":\"kimchy\",\"boost\":1.0}}}") - ); + assertThat(indexMd.getAliases().get("alias1").filter().string(), equalTo(""" + {"term":{"user":{"value":"kimchy","boost":1.0}}}""")); } @@ -801,7 +798,8 @@ public void testSameAlias() { Metadata metadata = internalCluster().clusterService().state().metadata(); IndexAbstraction ia = metadata.getIndicesLookup().get("alias1"); AliasMetadata aliasMetadata = AliasMetadata.getFirstAliasMetadata(metadata, ia); - assertThat(aliasMetadata.getFilter().toString(), equalTo("{\"term\":{\"name\":{\"value\":\"bar\",\"boost\":1.0}}}")); + assertThat(aliasMetadata.getFilter().toString(), equalTo(""" + {"term":{"name":{"value":"bar","boost":1.0}}}""")); logger.info("--> deleting alias1"); stopWatch.start(); @@ -1018,8 +1016,8 @@ public void testIndicesGetAliases() throws Exception { assertAcked(admin().indices().prepareAliases().removeAlias("foobar", "foo")); getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get(); - for (final ObjectObjectCursor> entry : getResponse.getAliases()) { - assertTrue(entry.value.isEmpty()); + for (final Map.Entry> entry : getResponse.getAliases().entrySet()) { + assertTrue(entry.getValue().isEmpty()); } assertTrue(admin().indices().prepareGetAliases("foo").addIndices("foobar").get().getAliases().isEmpty()); } @@ -1051,35 +1049,27 @@ public void testCreateIndexWithAliases() { } public void testCreateIndexWithAliasesInSource() throws Exception { - assertAcked( - prepareCreate("test").setSource( - "{\n" - + " \"aliases\" : {\n" - + " \"alias1\" : {},\n" - + " \"alias2\" : {\"filter\" : {\"match_all\": {}}},\n" - + " \"alias3\" : { \"index_routing\" : \"index\", \"search_routing\" : \"search\"},\n" - + " \"alias4\" : {\"is_hidden\": true}\n" - + " }\n" - + "}", - XContentType.JSON - ) - ); + assertAcked(prepareCreate("test").setSource(""" + { + "aliases" : { + "alias1" : {}, + "alias2" : {"filter" : {"match_all": {}}}, + "alias3" : { "index_routing" : "index", "search_routing" : "search"}, + "alias4" : {"is_hidden": true} + } + }""", XContentType.JSON)); checkAliases(); } public void testCreateIndexWithAliasesSource() { - assertAcked( - prepareCreate("test").setMapping("field", "type=text") - .setAliases( - "{\n" - + " \"alias1\" : {},\n" - + " \"alias2\" : {\"filter\" : {\"term\": {\"field\":\"value\"}}},\n" - + " \"alias3\" : { \"index_routing\" : \"index\", \"search_routing\" : \"search\"},\n" - + " \"alias4\" : {\"is_hidden\": true}\n" - + "}" - ) - ); + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases(""" + { + "alias1" : {}, + "alias2" : {"filter" : {"term": {"field":"value"}}}, + "alias3" : { "index_routing" : "index", "search_routing" : "search"}, + "alias4" : {"is_hidden": true} + }""")); checkAliases(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/SystemIndexAliasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/SystemIndexAliasIT.java new file mode 100644 index 0000000000000..55b217be8943f --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/SystemIndexAliasIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.aliases; + +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.indices.TestSystemIndexPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.stream.Collectors; + +import static org.elasticsearch.indices.TestSystemIndexDescriptor.INDEX_NAME; +import static org.elasticsearch.indices.TestSystemIndexDescriptor.PRIMARY_INDEX_NAME; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class SystemIndexAliasIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), TestSystemIndexPlugin.class); + } + + public void testCreateAliasForSystemIndex() throws Exception { + createIndex(PRIMARY_INDEX_NAME); + ensureGreen(); + assertAcked(admin().indices().prepareAliases().addAlias(PRIMARY_INDEX_NAME, INDEX_NAME + "-system-alias")); + + final GetAliasesResponse getAliasesResponse = client().admin() + .indices() + .getAliases(new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden())) + .get(); + + assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); + assertThat(getAliasesResponse.getAliases().get(PRIMARY_INDEX_NAME).size(), equalTo(2)); + assertThat( + getAliasesResponse.getAliases().get(PRIMARY_INDEX_NAME).stream().map(AliasMetadata::alias).collect(Collectors.toSet()), + containsInAnyOrder(".test-index", ".test-index-system-alias") + ); + getAliasesResponse.getAliases().get(PRIMARY_INDEX_NAME).forEach(alias -> assertThat(alias.isHidden(), is(true))); + + assertAcked(client().admin().indices().prepareDeleteTemplate("*").get()); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index c6a1758fd1780..465c42520faab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -444,8 +444,7 @@ public void testAddBlockWhileDeletingIndices() throws Exception { Consumer exceptionConsumer = t -> { Throwable cause = ExceptionsHelper.unwrapCause(t); - if (cause instanceof ClusterBlockException) { - ClusterBlockException e = (ClusterBlockException) cause; + if (cause instanceof ClusterBlockException e) { assertThat(e.blocks(), hasSize(1)); assertTrue(e.blocks().stream().allMatch(b -> b.id() == block.getBlock().id())); } else { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java index 83b0df1854400..1e0870f839fb5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java @@ -15,7 +15,7 @@ import java.io.IOException; -import static org.elasticsearch.client.Requests.indexRequest; +import static org.elasticsearch.client.internal.Requests.indexRequest; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 9854c494be472..6ed240d935c0d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -32,8 +32,7 @@ public class RecoveryWithUnsupportedIndicesIT extends ESIntegTestCase { /** * Return settings that could be used to start a node that has the given zipped home directory. */ - private Settings prepareBackwardsDataDir(Path backwardsIndex) throws IOException { - Path indexDir = createTempDir(); + private Settings prepareBackwardsDataDir(Path indexDir, Path backwardsIndex) throws IOException { Path dataDir = indexDir.resolve("data"); try (InputStream stream = Files.newInputStream(backwardsIndex)) { TestUtil.unzip(stream, indexDir); @@ -73,8 +72,9 @@ private Settings prepareBackwardsDataDir(Path backwardsIndex) throws IOException public void testUpgradeStartClusterOn_2_4_5() throws Exception { String indexName = "unsupported-2.4.5"; + Path indexDir = createTempDir(); logger.info("Checking static index {}", indexName); - Settings nodeSettings = prepareBackwardsDataDir(getDataPath("/indices/bwc").resolve(indexName + ".zip")); + Settings nodeSettings = prepareBackwardsDataDir(indexDir, getDataPath("/indices/bwc").resolve(indexName + ".zip")); assertThat( ExceptionsHelper.unwrap( expectThrows(Exception.class, () -> internalCluster().startNode(nodeSettings)), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/client/node/NodeClientIT.java b/server/src/internalClusterTest/java/org/elasticsearch/client/internal/node/NodeClientIT.java similarity index 92% rename from server/src/internalClusterTest/java/org/elasticsearch/client/node/NodeClientIT.java rename to server/src/internalClusterTest/java/org/elasticsearch/client/internal/node/NodeClientIT.java index d1af2245e6f41..c260b873d5ad9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/client/node/NodeClientIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/client/internal/node/NodeClientIT.java @@ -5,9 +5,9 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.client.node; +package org.elasticsearch.client.internal.node; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java index 67af6205591a5..9f3a61e3bd4fb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -293,20 +293,20 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { completionFuture.onFailure(e); - throw new AssertionError(source, e); + throw new AssertionError("looping task", e); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { if (keepSubmittingTasks.get()) { - clusterService.submitStateUpdateTask("looping task", this); + clusterService.submitStateUpdateTask("looping task", this, ClusterStateTaskExecutor.unbatched()); } else { completionFuture.onResponse(null); } } - }); + }, ClusterStateTaskExecutor.unbatched()); try { createIndex("index"); @@ -384,20 +384,20 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { completionFuture.onFailure(e); - throw new AssertionError(source, e); + throw new AssertionError("looping task", e); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { if (keepSubmittingTasks.get()) { - clusterService.submitStateUpdateTask("looping task", this); + clusterService.submitStateUpdateTask("looping task", this, ClusterStateTaskExecutor.unbatched()); } else { completionFuture.onResponse(null); } } - }); + }, ClusterStateTaskExecutor.unbatched()); try { final ClusterHealthResponse clusterHealthResponse = client().admin() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 491e9c1f125d4..8378d90d1f307 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -143,7 +143,7 @@ public void testClusterInfoServiceCollectsInformation() { Settings.builder() .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) - .put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean()) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, indexName.equals(TEST_SYSTEM_INDEX_NAME) || randomBoolean()) .build() ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 1c1e282c9516c..6603a6ee5aee6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.cluster; -import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.coordination.CoordinationMetadata; @@ -52,6 +50,7 @@ import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import static java.util.Collections.emptyList; @@ -90,28 +89,15 @@ public void testClusterStateDiffSerialization() throws Exception { if (i > 0) { clusterState = builder.build(); } - switch (randomInt(5)) { - case 0: - builder = randomNodes(clusterState); - break; - case 1: - builder = randomRoutingTable(clusterState); - break; - case 2: - builder = randomBlocks(clusterState); - break; - case 3: - builder = randomClusterStateCustoms(clusterState); - break; - case 4: - builder = randomMetadataChanges(clusterState); - break; - case 5: - builder = randomCoordinationMetadata(clusterState); - break; - default: - throw new IllegalArgumentException("Shouldn't be here"); - } + builder = switch (randomInt(5)) { + case 0 -> randomNodes(clusterState); + case 1 -> randomRoutingTable(clusterState); + case 2 -> randomBlocks(clusterState); + case 3 -> randomClusterStateCustoms(clusterState); + case 4 -> randomMetadataChanges(clusterState); + case 5 -> randomCoordinationMetadata(clusterState); + default -> throw new IllegalArgumentException("Shouldn't be here"); + }; } clusterState = builder.incrementVersion().build(); @@ -145,9 +131,9 @@ public void testClusterStateDiffSerialization() throws Exception { assertThat(clusterStateFromDiffs.nodes().getNodes(), equalTo(clusterState.nodes().getNodes())); assertThat(clusterStateFromDiffs.nodes().getLocalNodeId(), equalTo(previousClusterStateFromDiffs.nodes().getLocalNodeId())); assertThat(clusterStateFromDiffs.nodes().getNodes(), equalTo(clusterState.nodes().getNodes())); - for (ObjectCursor node : clusterStateFromDiffs.nodes().getNodes().keys()) { - DiscoveryNode node1 = clusterState.nodes().get(node.value); - DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.value); + for (Map.Entry node : clusterStateFromDiffs.nodes().getNodes().entrySet()) { + DiscoveryNode node1 = clusterState.nodes().get(node.getKey()); + DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.getKey()); assertThat(node1.getVersion(), equalTo(node2.getVersion())); assertThat(node1.getAddress(), equalTo(node2.getAddress())); assertThat(node1.getAttributes(), equalTo(node2.getAttributes())); @@ -320,16 +306,16 @@ private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds */ private IndexRoutingTable randomChangeToIndexRoutingTable(IndexRoutingTable original, String[] nodes) { IndexRoutingTable.Builder builder = IndexRoutingTable.builder(original.getIndex()); - for (ObjectCursor indexShardRoutingTable : original.shards().values()) { + for (Map.Entry indexShardRoutingTable : original.shards().entrySet()) { Set availableNodes = Sets.newHashSet(nodes); - for (ShardRouting shardRouting : indexShardRoutingTable.value.shards()) { + for (ShardRouting shardRouting : indexShardRoutingTable.getValue().shards()) { availableNodes.remove(shardRouting.currentNodeId()); if (shardRouting.relocating()) { availableNodes.remove(shardRouting.relocatingNodeId()); } } - for (ShardRouting shardRouting : indexShardRoutingTable.value.shards()) { + for (ShardRouting shardRouting : indexShardRoutingTable.getValue().shards()) { final ShardRouting updatedShardRouting = randomChange(shardRouting, availableNodes); availableNodes.remove(updatedShardRouting.currentNodeId()); if (shardRouting.relocating()) { @@ -367,14 +353,11 @@ private ClusterState.Builder randomBlocks(ClusterState clusterState) { * Returns a random global block */ private ClusterBlock randomGlobalBlock() { - switch (randomInt(2)) { - case 0: - return NoMasterBlockService.NO_MASTER_BLOCK_ALL; - case 1: - return NoMasterBlockService.NO_MASTER_BLOCK_WRITES; - default: - return GatewayService.STATE_NOT_RECOVERED_BLOCK; - } + return switch (randomInt(2)) { + case 0 -> NoMasterBlockService.NO_MASTER_BLOCK_ALL; + case 1 -> NoMasterBlockService.NO_MASTER_BLOCK_WRITES; + default -> GatewayService.STATE_NOT_RECOVERED_BLOCK; + }; } /** @@ -445,22 +428,13 @@ private ClusterState.Builder randomMetadataChanges(ClusterState clusterState) { Metadata metadata = clusterState.metadata(); int changesCount = randomIntBetween(1, 10); for (int i = 0; i < changesCount; i++) { - switch (randomInt(3)) { - case 0: - metadata = randomMetadataSettings(metadata); - break; - case 1: - metadata = randomIndices(metadata); - break; - case 2: - metadata = randomTemplates(metadata); - break; - case 3: - metadata = randomMetadataCustoms(metadata); - break; - default: - throw new IllegalArgumentException("Shouldn't be here"); - } + metadata = switch (randomInt(3)) { + case 0 -> randomMetadataSettings(metadata); + case 1 -> randomIndices(metadata); + case 2 -> randomTemplates(metadata); + case 3 -> randomMetadataCustoms(metadata); + default -> throw new IllegalArgumentException("Shouldn't be here"); + }; } return ClusterState.builder(clusterState).metadata(Metadata.builder(metadata).version(metadata.version() + 1).build()); } @@ -729,38 +703,35 @@ public ClusterState.Builder remove(ClusterState.Builder builder, String name) { @Override public ClusterState.Custom randomCreate(String name) { - switch (randomIntBetween(0, 1)) { - case 0: - return SnapshotsInProgress.EMPTY.withAddedEntry( - new SnapshotsInProgress.Entry( - new Snapshot(randomName("repo"), new SnapshotId(randomName("snap"), UUIDs.randomBase64UUID())), - randomBoolean(), - randomBoolean(), - SnapshotsInProgressSerializationTests.randomState(ImmutableOpenMap.of()), - Collections.emptyMap(), - Collections.emptyList(), - Collections.emptyList(), - Math.abs(randomLong()), - randomIntBetween(0, 1000), - ImmutableOpenMap.of(), - null, - SnapshotInfoTestUtils.randomUserMetadata(), - randomVersion(random()) - ) - ); - case 1: - return new RestoreInProgress.Builder().add( - new RestoreInProgress.Entry( - UUIDs.randomBase64UUID(), - new Snapshot(randomName("repo"), new SnapshotId(randomName("snap"), UUIDs.randomBase64UUID())), - RestoreInProgress.State.fromValue((byte) randomIntBetween(0, 3)), - emptyList(), - ImmutableOpenMap.of() - ) - ).build(); - default: - throw new IllegalArgumentException("Shouldn't be here"); - } + return switch (randomIntBetween(0, 1)) { + case 0 -> SnapshotsInProgress.EMPTY.withAddedEntry( + new SnapshotsInProgress.Entry( + new Snapshot(randomName("repo"), new SnapshotId(randomName("snap"), UUIDs.randomBase64UUID())), + randomBoolean(), + randomBoolean(), + SnapshotsInProgressSerializationTests.randomState(ImmutableOpenMap.of()), + Collections.emptyMap(), + Collections.emptyList(), + Collections.emptyList(), + Math.abs(randomLong()), + randomIntBetween(0, 1000), + ImmutableOpenMap.of(), + null, + SnapshotInfoTestUtils.randomUserMetadata(), + randomVersion(random()) + ) + ); + case 1 -> new RestoreInProgress.Builder().add( + new RestoreInProgress.Entry( + UUIDs.randomBase64UUID(), + new Snapshot(randomName("repo"), new SnapshotId(randomName("snap"), UUIDs.randomBase64UUID())), + RestoreInProgress.State.fromValue((byte) randomIntBetween(0, 3)), + emptyList(), + ImmutableOpenMap.of() + ) + ).build(); + default -> throw new IllegalArgumentException("Shouldn't be here"); + }; } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 937a978fcd21d..181d5c57221ac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.metadata.Metadata; @@ -335,7 +335,7 @@ public void testCannotCommitStateThreeNodes() throws Exception { final ClusterService masterClusterService = internalCluster().clusterService(master); masterClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { latch.countDown(); } @@ -351,11 +351,11 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { failure.set(e); latch.countDown(); } - }); + }, ClusterStateTaskExecutor.unbatched()); logger.debug("--> waiting for cluster state to be processed/rejected"); latch.await(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index a02ca79531818..64d0a87d1a90a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -16,8 +16,8 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.coordination.NoMasterBlockService; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index b0eee30c87f6f..6f1292df8be47 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -464,11 +464,11 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { throw new AssertionError(e); } - }); + }, ClusterStateTaskExecutor.unbatched()); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java index 92326c9bf9c35..9d93705637233 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Priority; @@ -22,7 +22,7 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.xcontent.XContentType; -import static org.elasticsearch.client.Requests.createIndexRequest; +import static org.elasticsearch.client.internal.Requests.createIndexRequest; import static org.elasticsearch.core.TimeValue.timeValueSeconds; import static org.elasticsearch.test.NodeRoles.dataNode; import static org.elasticsearch.test.NodeRoles.nonDataNode; @@ -31,7 +31,8 @@ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class SimpleDataNodesIT extends ESIntegTestCase { - private static final String SOURCE = "{\"type1\":{\"id\":\"1\",\"name\":\"test\"}}"; + private static final String SOURCE = """ + {"type1":{"id":"1","name":"test"}}"""; public void testIndexingBeforeAndAfterDataNodesStart() { internalCluster().startNode(nonDataNode()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index ab3190293e596..8859bf4da4cdd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -295,12 +295,22 @@ public void testAliasFilterValidation() { logger.info("--> start data node / non master node"); internalCluster().startDataOnlyNode(); - assertAcked( - prepareCreate("test").setMapping( - "{\"properties\" : {\"table_a\" : { \"type\" : \"nested\", " - + "\"properties\" : {\"field_a\" : { \"type\" : \"keyword\" },\"field_b\" :{ \"type\" : \"keyword\" }}}}}" - ) - ); + assertAcked(prepareCreate("test").setMapping(""" + { + "properties": { + "table_a": { + "type": "nested", + "properties": { + "field_a": { + "type": "keyword" + }, + "field_b": { + "type": "keyword" + } + } + } + } + }""")); client().admin() .indices() .prepareAliases() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index c8f65d6397d64..54db6abab91dd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -241,7 +240,7 @@ public void testDisablingAllocationFiltering() { public void testInvalidIPFilterClusterSettings() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); - Setting filterSetting = randomFrom( + var filterSetting = randomFrom( FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 709ea127bb7a1..d588ab2047ff6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -107,8 +108,8 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) {} - }); + public void onFailure(Exception e) {} + }, ClusterStateTaskExecutor.unbatched()); ensureGreen(index); // remove the extra node clusterService.submitStateUpdateTask("test-remove-injected-node", new ClusterStateUpdateTask() { @@ -122,8 +123,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) {} - }); + public void onFailure(Exception e) {} + }, ClusterStateTaskExecutor.unbatched()); } private ActionFuture executeAndCancelCommittedPublication( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index fbe690a89f2b4..f0278e6636052 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -8,38 +8,21 @@ package org.elasticsearch.cluster.coordination; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.TestCustomMetadata; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import java.util.EnumSet; -import java.util.concurrent.TimeUnit; - import static org.elasticsearch.test.NodeRoles.dataNode; import static org.elasticsearch.test.NodeRoles.masterOnlyNode; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -80,64 +63,6 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster)); } - public void testHandleNodeJoin_incompatibleClusterState() { - String masterNode = internalCluster().startMasterOnlyNode(); - String node1 = internalCluster().startNode(); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1); - final ClusterState state = clusterService.state(); - Metadata.Builder mdBuilder = Metadata.builder(state.metadata()); - mdBuilder.putCustom(CustomMetadata.TYPE, new CustomMetadata("data")); - ClusterState stateWithCustomMetadata = ClusterState.builder(state).metadata(mdBuilder).build(); - - final PlainActionFuture future = new PlainActionFuture<>(); - - internalCluster().getInstance(TransportService.class, masterNode) - .sendRequest( - state.nodes().getLocalNode(), - JoinHelper.JOIN_VALIDATE_ACTION_NAME, - new ValidateJoinRequest(stateWithCustomMetadata), - new ActionListenerResponseHandler<>(new ActionListener<>() { - @Override - public void onResponse(TransportResponse.Empty unused) { - fail("onResponse should not be called"); - } - - @Override - public void onFailure(Exception t) { - assertThat(t, instanceOf(RemoteTransportException.class)); - assertThat(t.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(t.getCause().getMessage(), containsString("Unknown NamedWriteable")); - future.onResponse(null); - } - }, i -> TransportResponse.Empty.INSTANCE, ThreadPool.Names.GENERIC) - ); - - future.actionGet(10, TimeUnit.SECONDS); - } - - public static class CustomMetadata extends TestCustomMetadata { - public static final String TYPE = "custom_md"; - - CustomMetadata(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY, Metadata.XContentContext.SNAPSHOT); - } - } - public void testDiscoveryStats() throws Exception { internalCluster().startNode(); ensureGreen(); // ensures that all events are processed (in particular state recovery fully completed) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index b2f037b8984f3..ee11668388e2d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -168,22 +168,20 @@ public void testTemplateUpdate() throws Exception { for (int i = 0; i < 3; i++) { IndexTemplateMetadata templateMetadata = templates.get(i); switch (templateMetadata.getName()) { - case "test_added_template": + case "test_added_template" -> { assertFalse(addedFound); addedFound = true; - break; - case "test_changed_template": + } + case "test_changed_template" -> { assertFalse(changedFound); changedFound = true; assertThat(templateMetadata.getOrder(), equalTo(10)); - break; - case "test_dummy_template": + } + case "test_dummy_template" -> { assertFalse(dummyFound); dummyFound = true; - break; - default: - fail("unexpected template " + templateMetadata.getName()); - break; + } + default -> fail("unexpected template " + templateMetadata.getName()); } } assertTrue(addedFound); @@ -221,18 +219,16 @@ private void assertTemplates() throws Exception { for (int i = 0; i < 2; i++) { IndexTemplateMetadata templateMetadata = templates.get(i); switch (templateMetadata.getName()) { - case "test_added_template": + case "test_added_template" -> { assertFalse(addedFound); addedFound = true; - break; - case "test_changed_template": + } + case "test_changed_template" -> { assertFalse(changedFound); changedFound = true; assertThat(templateMetadata.getOrder(), equalTo(10)); - break; - default: - fail("unexpected template " + templateMetadata.getName()); - break; + } + default -> fail("unexpected template " + templateMetadata.getName()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index 954b9e4758135..d6b1d7456eef8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanation; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index f733b1b742bb8..693efc66aadbd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.cluster.routing; -import com.carrotsearch.hppc.cursors.IntObjectCursor; - import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; @@ -50,6 +48,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -296,9 +295,9 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { .getStoreStatuses() .get(idxName); ClusterRerouteRequestBuilder rerouteBuilder = client().admin().cluster().prepareReroute(); - for (IntObjectCursor> shardStoreStatuses : storeStatuses) { - int shardId = shardStoreStatuses.key; - IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.value); + for (Map.Entry> shardStoreStatuses : storeStatuses.entrySet()) { + int shardId = shardStoreStatuses.getKey(); + IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.getValue()); logger.info("--> adding allocation command for shard {}", shardId); // force allocation based on node id if (useStaleReplica) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java new file mode 100644 index 0000000000000..b3a7ecdea6ece --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.DiskUsageIntegTestCase; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.tasks.TaskResultsService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Locale; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING; +import static org.elasticsearch.index.store.Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class DiskThresholdMonitorIT extends DiskUsageIntegTestCase { + + private static final long FLOODSTAGE_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes(); + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES * 2 + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES * 2 + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms") + .build(); + } + + public void testFloodStageExceeded() throws Exception { + internalCluster().startMasterOnlyNode(); + final String dataNodeName = internalCluster().startDataOnlyNode(); + + final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms") + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeName) + .build() + ); + // ensure we have a system index on the data node too. + assertAcked(client().admin().indices().prepareCreate(TaskResultsService.TASK_INDEX)); + + getTestFileStore(dataNodeName).setTotalSpace(1L); + refreshClusterInfo(); + assertBusy(() -> { + assertBlocked( + client().prepareIndex().setIndex(indexName).setId("1").setSource("f", "g"), + IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK + ); + assertThat( + client().admin() + .indices() + .prepareGetSettings(indexName) + .setNames(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE) + .get() + .getSetting(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), + equalTo("true") + ); + }); + + // Verify that we can adjust things like allocation filters even while blocked + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(indexName) + .setSettings( + Settings.builder().putNull(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey()) + ) + ); + + // Verify that we can still move shards around even while blocked + final String newDataNodeName = internalCluster().startDataOnlyNode(); + final String newDataNodeId = client().admin().cluster().prepareNodesInfo(newDataNodeName).get().getNodes().get(0).getNode().getId(); + assertBusy(() -> { + final ShardRouting primaryShard = client().admin() + .cluster() + .prepareState() + .clear() + .setRoutingTable(true) + .setNodes(true) + .setIndices(indexName) + .get() + .getState() + .routingTable() + .index(indexName) + .shard(0) + .primaryShard(); + assertThat(primaryShard.state(), equalTo(ShardRoutingState.STARTED)); + assertThat(primaryShard.currentNodeId(), equalTo(newDataNodeId)); + }); + + // Verify that the block is removed once the shard migration is complete + refreshClusterInfo(); + assertFalse(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertNull( + client().admin() + .indices() + .prepareGetSettings(indexName) + .setNames(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE) + .get() + .getSetting(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE) + ); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index f1dc9f3b60859..501089910a479 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -433,6 +433,7 @@ public void testMovesShardsOffSpecificDataPathAboveWatermark() throws Exception .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "90%") .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "90%") .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "100%") + .put(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms") ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index eeb9cbbe5b805..581022ed5b6f8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -7,10 +7,10 @@ */ package org.elasticsearch.cluster.service; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; @@ -68,7 +68,7 @@ public void onAckTimeout() { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { processedLatch.countDown(); } @@ -79,12 +79,13 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + public void onFailure(Exception e) { + logger.error("failed to execute callback in test", e); onFailure.set(true); latch.countDown(); } - } + }, + ClusterStateTaskExecutor.unbatched() ); ensureGreen(); @@ -124,7 +125,7 @@ public void onAckTimeout() { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { processedLatch.countDown(); } @@ -135,12 +136,13 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + public void onFailure(Exception e) { + logger.error("failed to execute callback in test", e); onFailure.set(true); latch.countDown(); } - } + }, + ClusterStateTaskExecutor.unbatched() ); ensureGreen(); @@ -185,7 +187,7 @@ public void onAckTimeout() { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {} + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {} @Override public ClusterState execute(ClusterState currentState) throws Exception { @@ -194,12 +196,13 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + public void onFailure(Exception e) { + logger.error("failed to execute callback in test", e); onFailure.set(true); latch.countDown(); } - } + }, + ClusterStateTaskExecutor.unbatched() ); ensureGreen(); @@ -242,7 +245,7 @@ public void onAckTimeout() { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { processedLatch.countDown(); } @@ -253,12 +256,13 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + public void onFailure(Exception e) { + logger.error("failed to execute callback in test", e); onFailure.set(true); latch.countDown(); } - } + }, + ClusterStateTaskExecutor.unbatched() ); ensureGreen(); @@ -292,11 +296,11 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { invoked1.countDown(); fail(); } - }); + }, ClusterStateTaskExecutor.unbatched()); invoked1.await(); final CountDownLatch invoked2 = new CountDownLatch(9); for (int i = 2; i <= 10; i++) { @@ -307,15 +311,15 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { fail(); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { invoked2.countDown(); } - }); + }, ClusterStateTaskExecutor.unbatched()); } // there might be other tasks in this node, make sure to only take the ones we add into account in this test @@ -362,11 +366,11 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { invoked3.countDown(); fail(); } - }); + }, ClusterStateTaskExecutor.unbatched()); invoked3.await(); for (int i = 2; i <= 5; i++) { @@ -377,10 +381,10 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { fail(); } - }); + }, ClusterStateTaskExecutor.unbatched()); } Thread.sleep(100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java index e71990ca8cda6..55fe9661582f1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java @@ -31,13 +31,11 @@ public class ClusterSearchShardsIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - switch (nodeOrdinal % 2) { - case 1: - return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).put("node.attr.tag", "B").build(); - case 0: - return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).put("node.attr.tag", "A").build(); - } - return super.nodeSettings(nodeOrdinal, otherSettings); + return switch (nodeOrdinal % 2) { + case 1 -> Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).put("node.attr.tag", "B").build(); + case 0 -> Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).put("node.attr.tag", "A").build(); + default -> super.nodeSettings(nodeOrdinal, otherSettings); + }; } public void testSingleShardAllocation() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index f7e8ffa66bb9d..fd4bf7521860a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Priority; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 49633e9d8019e..048e93245bf37 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.coordination.ClusterBootstrapService; @@ -231,8 +231,8 @@ public void testAckedIndexing() throws Exception { } // in case of a bridge partition, shard allocation can fail "index.allocation.max_retries" times if the master // is the super-connected node and recovery source and target are on opposite sides of the bridge - if (disruptionScheme instanceof NetworkDisruption - && ((NetworkDisruption) disruptionScheme).getDisruptedLinks() instanceof Bridge) { + if (disruptionScheme instanceof NetworkDisruption networkDisruption + && networkDisruption.getDisruptedLinks() instanceof Bridge) { assertBusy(() -> assertAcked(client().admin().cluster().prepareReroute().setRetryFailed(true))); } ensureGreen("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 82b0a9171f479..492a65d6f37b7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -142,19 +142,12 @@ public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception { assertEquals("different meta data version", state.metadata().version(), nodeState.metadata().version()); assertEquals("different routing", state.routingTable().toString(), nodeState.routingTable().toString()); } catch (AssertionError t) { - fail( - "failed comparing cluster state: " - + t.getMessage() - + "\n" - + "--- cluster state of node [" - + nodes.get(0) - + "]: ---\n" - + state - + "\n--- cluster state [" - + node - + "]: ---\n" - + nodeState - ); + fail(""" + failed comparing cluster state: %s + --- cluster state of node [%s]: --- + %s + --- cluster state [%s]: --- + %s""".formatted(t.getMessage(), nodes.get(0), state, node, nodeState)); } } @@ -209,12 +202,9 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { success = false; } if (success == false) { - fail( - "node [" - + node - + "] has no master or has blocks, despite of being on the right side of the partition. State dump:\n" - + nodeState - ); + fail(""" + node [%s] has no master or has blocks, despite of being on the right side of the partition. State dump: + %s""".formatted(node, nodeState)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index be9301733078b..eafa65ebbe1fa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -8,9 +8,9 @@ package org.elasticsearch.discovery; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; @@ -241,10 +241,10 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); + public void onFailure(Exception e) { + logger.warn("failure [sneaky-update]", e); } - }); + }, ClusterStateTaskExecutor.unbatched()); // Save the new elected master node final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java index 8f150f324e191..19d091714622a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; -import static org.elasticsearch.client.Requests.createIndexRequest; +import static org.elasticsearch.client.internal.Requests.createIndexRequest; public class AliasedIndexDocumentActionsIT extends DocumentActionsIT { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java index 27010e5f94acb..81d65a0ed2eb6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java @@ -29,10 +29,10 @@ import java.io.IOException; import static org.elasticsearch.action.DocWriteRequest.OpType; -import static org.elasticsearch.client.Requests.clearIndicesCacheRequest; -import static org.elasticsearch.client.Requests.getRequest; -import static org.elasticsearch.client.Requests.indexRequest; -import static org.elasticsearch.client.Requests.refreshRequest; +import static org.elasticsearch.client.internal.Requests.clearIndicesCacheRequest; +import static org.elasticsearch.client.internal.Requests.getRequest; +import static org.elasticsearch.client.internal.Requests.indexRequest; +import static org.elasticsearch.client.internal.Requests.refreshRequest; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java index 6051f3c891e4e..14345443f00e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -136,7 +136,16 @@ public void testFailsToStartIfUpgradedTooFar() { ); assertThat( illegalStateException.getMessage(), - allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]")) + allOf( + startsWith("cannot upgrade a node from version ["), + endsWith( + "] directly to version [" + + Version.CURRENT + + "], upgrade to version [" + + Version.CURRENT.minimumCompatibilityVersion() + + "] first." + ) + ) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 3e39adbe487ff..cff7610d52097 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -16,8 +16,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetadata; import org.elasticsearch.cluster.metadata.IndexGraveyard; @@ -466,18 +466,15 @@ public void testRecoverMissingAnalyzer() throws Exception { internalCluster().startNode(); prepareCreate("test").setSettings( Settings.builder().put("index.analysis.analyzer.test.tokenizer", "standard").put("index.number_of_shards", "1") - ) - .setMapping( - "{\n" - + " \"properties\": {\n" - + " \"field1\": {\n" - + " \"type\": \"text\",\n" - + " \"analyzer\": \"test\"\n" - + " }\n" - + " }\n" - + " }}" - ) - .get(); + ).setMapping(""" + { + "properties": { + "field1": { + "type": "text", + "analyzer": "test" + } + } + }""").get(); logger.info("--> indexing a simple document"); client().prepareIndex("test").setId("1").setSource("field1", "value one").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); @@ -611,7 +608,7 @@ public void testHalfDeletedIndexImport() throws Exception { .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metadata.index("test").getIndex()).build()) .build() ); - NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, Version.CURRENT), paths); + NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, Version.CURRENT, metadata.oldestIndexVersion()), paths); }); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java index acb66ecb6763b..6a5fa315f5642 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -9,7 +9,7 @@ package org.elasticsearch.gateway; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -18,7 +18,7 @@ import java.util.concurrent.TimeUnit; -import static org.elasticsearch.client.Requests.clusterHealthRequest; +import static org.elasticsearch.client.internal.Requests.clusterHealthRequest; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index 1b7c4ce47aa22..ec87a0d1fa2fa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.gateway; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index b74420e874dc7..3728eb90a7793 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -669,31 +669,33 @@ public void testGetFieldsComplexField() throws Exception { } public void testUngeneratedFieldsThatAreNeverStored() throws IOException { - String createIndexSource = "{\n" - + " \"settings\": {\n" - + " \"index.translog.flush_threshold_size\": \"1pb\",\n" - + " \"refresh_interval\": \"-1\"\n" - + " },\n" - + " \"mappings\": {\n" - + " \"_doc\": {\n" - + " \"properties\": {\n" - + " \"suggest\": {\n" - + " \"type\": \"completion\"\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + "}"; + String createIndexSource = """ + { + "settings": { + "index.translog.flush_threshold_size": "1pb", + "refresh_interval": "-1" + }, + "mappings": { + "_doc": { + "properties": { + "suggest": { + "type": "completion" + } + } + } + } + }"""; assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); ensureGreen(); - String doc = "{\n" - + " \"suggest\": {\n" - + " \"input\": [\n" - + " \"Nevermind\",\n" - + " \"Nirvana\"\n" - + " ]\n" - + " }\n" - + "}"; + String doc = """ + { + "suggest": { + "input": [ + "Nevermind", + "Nirvana" + ] + } + }"""; index("test", "1", doc); String[] fieldsList = { "suggest" }; @@ -708,12 +710,13 @@ public void testUngeneratedFieldsThatAreNeverStored() throws IOException { } public void testUngeneratedFieldsThatAreAlwaysStored() throws IOException { - String createIndexSource = "{\n" - + " \"settings\": {\n" - + " \"index.translog.flush_threshold_size\": \"1pb\",\n" - + " \"refresh_interval\": \"-1\"\n" - + " }\n" - + "}"; + String createIndexSource = """ + { + "settings": { + "index.translog.flush_threshold_size": "1pb", + "refresh_interval": "-1" + } + }"""; assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); ensureGreen(); @@ -731,16 +734,21 @@ public void testUngeneratedFieldsThatAreAlwaysStored() throws IOException { } public void testUngeneratedFieldsNotPartOfSourceStored() throws IOException { - String createIndexSource = "{\n" - + " \"settings\": {\n" - + " \"index.translog.flush_threshold_size\": \"1pb\",\n" - + " \"refresh_interval\": \"-1\"\n" - + " }\n" - + "}"; + String createIndexSource = """ + { + "settings": { + "index.translog.flush_threshold_size": "1pb", + "refresh_interval": "-1" + } + }"""; assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); ensureGreen(); - String doc = "{\n" + " \"text\": \"some text.\"\n" + "}\n"; + String doc = """ + { + "text": "some text." + } + """; client().prepareIndex("test").setId("1").setSource(doc, XContentType.JSON).setRouting("1").get(); String[] fieldsList = { "_routing" }; // before refresh - document is only in translog @@ -781,37 +789,39 @@ public void testGeneratedStringFieldsStored() throws IOException { void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolean sourceEnabled) { String storedString = stored ? "true" : "false"; - String createIndexSource = "{\n" - + " \"settings\": {\n" - + " \"index.translog.flush_threshold_size\": \"1pb\",\n" - + " \"refresh_interval\": \"-1\"\n" - + " },\n" - + " \"mappings\": {\n" - + " \"_doc\": {\n" - + " \"_source\" : {\"enabled\" : " - + sourceEnabled - + "}," - + " \"properties\": {\n" - + " \"text1\": {\n" - + " \"type\": \"text\",\n" - + " \"store\": \"" - + storedString - + "\"" - + " },\n" - + " \"text2\": {\n" - + " \"type\": \"text\",\n" - + " \"store\": \"" - + storedString - + "\"" - + " }" - + " }\n" - + " }\n" - + " }\n" - + "}"; + String createIndexSource = """ + { + "settings": { + "index.translog.flush_threshold_size": "1pb", + "refresh_interval": "-1" + }, + "mappings": { + "_doc": { + "_source": { + "enabled": %s + }, + "properties": { + "text1": { + "type": "text", + "store": "%s" + }, + "text2": { + "type": "text", + "store": "%s" + } + } + } + } + }""".formatted(sourceEnabled, storedString, storedString); assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); ensureGreen(); - String doc = "{\n" + " \"text1\": \"some text.\"\n," + " \"text2\": \"more text.\"\n" + "}\n"; + String doc = """ + { + "text1": "some text." + , "text2": "more text." + } + """; index("test", "1", doc); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java index 1340966e63ada..8ab460fe9fa1b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java @@ -18,7 +18,7 @@ import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; @@ -79,7 +79,8 @@ public void testFinalPipelineCantChangeDestination() { final Settings settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("index", settings); - final BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"changing_dest\": {}}]}"); + final BytesReference finalPipelineBody = new BytesArray(""" + {"processors": [{"changing_dest": {}}]}"""); client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final IllegalStateException e = expectThrows( @@ -96,13 +97,15 @@ public void testFinalPipelineOfOldDestinationIsNotInvoked() { .build(); createIndex("index", settings); - BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"changing_dest\": {}}]}"); + BytesReference defaultPipelineBody = new BytesArray(""" + {"processors": [{"changing_dest": {}}]}"""); client().admin() .cluster() .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) .actionGet(); - BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {\"exists\":\"no_such_field\"}}]}"); + BytesReference finalPipelineBody = new BytesArray(""" + {"processors": [{"final": {"exists":"no_such_field"}}]}"""); client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); IndexResponse indexResponse = client().prepareIndex("index") @@ -123,13 +126,15 @@ public void testFinalPipelineOfNewDestinationIsInvoked() { settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("target", settings); - BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"changing_dest\": {}}]}"); + BytesReference defaultPipelineBody = new BytesArray(""" + {"processors": [{"changing_dest": {}}]}"""); client().admin() .cluster() .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) .actionGet(); - BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {}}]}"); + BytesReference finalPipelineBody = new BytesArray(""" + {"processors": [{"final": {}}]}"""); client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); IndexResponse indexResponse = client().prepareIndex("index") @@ -150,13 +155,15 @@ public void testDefaultPipelineOfNewDestinationIsNotInvoked() { settings = Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "target_default_pipeline").build(); createIndex("target", settings); - BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"changing_dest\": {}}]}"); + BytesReference defaultPipelineBody = new BytesArray(""" + {"processors": [{"changing_dest": {}}]}"""); client().admin() .cluster() .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) .actionGet(); - BytesReference targetPipeline = new BytesArray("{\"processors\": [{\"final\": {}}]}"); + BytesReference targetPipeline = new BytesArray(""" + {"processors": [{"final": {}}]}"""); client().admin() .cluster() .putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)) @@ -186,12 +193,14 @@ public void testFinalPipeline() { } public void testRequestPipelineAndFinalPipeline() { - final BytesReference requestPipelineBody = new BytesArray("{\"processors\": [{\"request\": {}}]}"); + final BytesReference requestPipelineBody = new BytesArray(""" + {"processors": [{"request": {}}]}"""); client().admin() .cluster() .putPipeline(new PutPipelineRequest("request_pipeline", requestPipelineBody, XContentType.JSON)) .actionGet(); - final BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {\"exists\":\"request\"}}]}"); + final BytesReference finalPipelineBody = new BytesArray(""" + {"processors": [{"final": {"exists":"request"}}]}"""); client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final Settings settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("index", settings); @@ -212,12 +221,14 @@ public void testRequestPipelineAndFinalPipeline() { } public void testDefaultAndFinalPipeline() { - final BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"default\": {}}]}"); + final BytesReference defaultPipelineBody = new BytesArray(""" + {"processors": [{"default": {}}]}"""); client().admin() .cluster() .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) .actionGet(); - final BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {\"exists\":\"default\"}}]}"); + final BytesReference finalPipelineBody = new BytesArray(""" + {"processors": [{"final": {"exists":"default"}}]}"""); client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final Settings settings = Settings.builder() .put(IndexSettings.DEFAULT_PIPELINE.getKey(), "default_pipeline") @@ -240,12 +251,14 @@ public void testDefaultAndFinalPipeline() { } public void testDefaultAndFinalPipelineFromTemplates() { - final BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"default\": {}}]}"); + final BytesReference defaultPipelineBody = new BytesArray(""" + {"processors": [{"default": {}}]}"""); client().admin() .cluster() .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) .actionGet(); - final BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {\"exists\":\"default\"}}]}"); + final BytesReference finalPipelineBody = new BytesArray(""" + {"processors": [{"final": {"exists":"default"}}]}"""); client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final int lowOrder = randomIntBetween(0, Integer.MAX_VALUE - 1); final int highOrder = randomIntBetween(lowOrder + 1, Integer.MAX_VALUE); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java index d4ad10d0ea923..c6dca12790036 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.index; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.AbstractModule; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java index 31a727d1c1bc0..20a420ebe7cd6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java index b695d97cafda0..346882f2ae639 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java index 065754cae124c..0f0588d83fe6b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java @@ -156,15 +156,7 @@ public void testMaxDocsLimitConcurrently() throws Exception { assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) totalSuccess)); } - static final class IndexingResult { - final int numSuccess; - final int numFailures; - - IndexingResult(int numSuccess, int numFailures) { - this.numSuccess = numSuccess; - this.numFailures = numFailures; - } - } + record IndexingResult(int numSuccess, int numFailures) {} static IndexingResult indexDocs(int numRequests, int numThreads) throws Exception { final AtomicInteger completedRequests = new AtomicInteger(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 38b0bed97457d..f6ee9c9eac607 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.service.ClusterService; @@ -155,10 +156,10 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { throw new AssertionError("unexpected", e); } - }); + }, ClusterStateTaskExecutor.unbatched()); masterBlockedLatch.await(); final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index") @@ -192,10 +193,10 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { throw new AssertionError("unexpected", e); } - }); + }, ClusterStateTaskExecutor.unbatched()); masterBlockedLatch.await(); final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index").setId("2").setSource("field2", "value2"); @@ -348,14 +349,16 @@ public void testBulkRequestWithNotFoundDynamicTemplate() throws Exception { } public void testDynamicRuntimeNoConflicts() { - assertAcked(client().admin().indices().prepareCreate("test").setMapping("{\"_doc\":{\"dynamic\":\"runtime\"}}").get()); + assertAcked(client().admin().indices().prepareCreate("test").setMapping(""" + {"_doc":{"dynamic":"runtime"}}""").get()); List docs = new ArrayList<>(); // the root is mapped dynamic:runtime hence there are no type conflicts docs.add(new IndexRequest("test").source("one.two.three", new int[] { 1, 2, 3 })); docs.add(new IndexRequest("test").source("one.two", 3.5)); docs.add(new IndexRequest("test").source("one", "one")); - docs.add(new IndexRequest("test").source("{\"one\":{\"two\": { \"three\": \"three\"}}}", XContentType.JSON)); + docs.add(new IndexRequest("test").source(""" + {"one":{"two": { "three": "three"}}}""", XContentType.JSON)); Collections.shuffle(docs, random()); BulkRequest bulkRequest = new BulkRequest(); for (IndexRequest doc : docs) { @@ -380,16 +383,21 @@ public void testDynamicRuntimeNoConflicts() { } public void testDynamicRuntimeObjectFields() { - assertAcked( - client().admin() - .indices() - .prepareCreate("test") - .setMapping( - "{\"_doc\":{\"properties\":{" - + "\"obj\":{\"properties\":{\"runtime\":{\"type\":\"object\",\"dynamic\":\"runtime\"}}}}}}" - ) - .get() - ); + assertAcked(client().admin().indices().prepareCreate("test").setMapping(""" + { + "_doc": { + "properties": { + "obj": { + "properties": { + "runtime": { + "type": "object", + "dynamic": "runtime" + } + } + } + } + } + }""").get()); List docs = new ArrayList<>(); docs.add(new IndexRequest("test").source("obj.one", 1)); @@ -397,7 +405,8 @@ public void testDynamicRuntimeObjectFields() { // obj.runtime is mapped dynamic:runtime hence there are no type conflicts docs.add(new IndexRequest("test").source("obj.runtime.one.two", "test")); docs.add(new IndexRequest("test").source("obj.runtime.one", "one")); - docs.add(new IndexRequest("test").source("{\"obj\":{\"runtime\":{\"one\":{\"two\": 1}}}}", XContentType.JSON)); + docs.add(new IndexRequest("test").source(""" + {"obj":{"runtime":{"one":{"two": 1}}}}""", XContentType.JSON)); Collections.shuffle(docs, random()); BulkRequest bulkRequest = new BulkRequest(); for (IndexRequest doc : docs) { @@ -435,22 +444,34 @@ public void testDynamicRuntimeObjectFields() { exception.getMessage() ); - assertAcked( - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - "{\"_doc\":{\"properties\":{\"obj\":{\"properties\":" - + "{\"runtime\":{\"properties\":{\"dynamic\":{\"type\":\"object\", \"dynamic\":true}}}}}}}}", - XContentType.JSON - ) - ); + assertAcked(client().admin().indices().preparePutMapping("test").setSource(""" + { + "_doc": { + "properties": { + "obj": { + "properties": { + "runtime": { + "properties": { + "dynamic": { + "type": "object", + "dynamic": true + } + } + } + } + } + } + } + }""", XContentType.JSON)); // the parent object has been mapped dynamic:true, hence the field gets indexed + // we use a fixed doc id here to make sure this document and the one we sent later with a conflicting type + // target the same shard where we are sure the mapping update has been applied assertEquals( RestStatus.CREATED, client().prepareIndex("test") .setSource("obj.runtime.dynamic.number", 1) + .setId("id") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get() .status() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index 2dcdb3e580914..51724955590a4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.seqno; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index 15a8a6de792d9..01645b7eb2165 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -10,7 +10,6 @@ import joptsimple.OptionParser; import joptsimple.OptionSet; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.index.IndexWriter; @@ -208,10 +207,10 @@ public Settings onNodeStopped(String nodeName) throws Exception { String nodeId = null; final ClusterState state = client().admin().cluster().prepareState().get().getState(); final DiscoveryNodes nodes = state.nodes(); - for (ObjectObjectCursor cursor : nodes.getNodes()) { - final String name = cursor.value.getName(); + for (Map.Entry cursor : nodes.getNodes().entrySet()) { + final String name = cursor.getValue().getName(); if (name.equals(node)) { - nodeId = cursor.key; + nodeId = cursor.getKey(); break; } } @@ -396,10 +395,10 @@ public Settings onNodeStopped(String nodeName) throws Exception { String primaryNodeId = null; final ClusterState state = client().admin().cluster().prepareState().get().getState(); final DiscoveryNodes nodes = state.nodes(); - for (ObjectObjectCursor cursor : nodes.getNodes()) { - final String name = cursor.value.getName(); + for (Map.Entry cursor : nodes.getNodes().entrySet()) { + final String name = cursor.getValue().getName(); if (name.equals(node1)) { - primaryNodeId = cursor.key; + primaryNodeId = cursor.getKey(); break; } } @@ -603,8 +602,8 @@ public void testResolvePath() throws Exception { final Map nodeNameToNodeId = new HashMap<>(); final ClusterState state = client().admin().cluster().prepareState().get().getState(); final DiscoveryNodes nodes = state.nodes(); - for (ObjectObjectCursor cursor : nodes.getNodes()) { - nodeNameToNodeId.put(cursor.value.getName(), cursor.key); + for (Map.Entry cursor : nodes.getNodes().entrySet()) { + nodeNameToNodeId.put(cursor.getValue().getName(), cursor.getKey()); } final GroupShardsIterator shardIterators = state.getRoutingTable() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index d2e2acaccf30d..2042c54542307 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.index.store; -import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.index.CheckIndex; @@ -24,7 +23,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -78,6 +77,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.CopyOnWriteArrayList; @@ -636,9 +636,11 @@ public void testReplicaCorruption() throws Exception { final IndicesShardStoresResponse stores = client().admin().indices().prepareShardStores(index.getName()).get(); - for (IntObjectCursor> shards : stores.getStoreStatuses().get(index.getName())) { - for (IndicesShardStoresResponse.StoreStatus store : shards.value) { - final ShardId shardId = new ShardId(index, shards.key); + for (Map.Entry> shards : stores.getStoreStatuses() + .get(index.getName()) + .entrySet()) { + for (IndicesShardStoresResponse.StoreStatus store : shards.getValue()) { + final ShardId shardId = new ShardId(index, shards.getKey()); if (store.getAllocationStatus().equals(IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED)) { for (Path path : findFilesToCorruptOnNode(store.getNode().getName(), shardId)) { try (OutputStream os = Files.newOutputStream(path)) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java index e17265c57513b..b63b651a20d9d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -18,7 +18,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.plugins.Plugin; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index 6fa7f7e79309c..59346a5f4eebe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -27,8 +27,8 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.elasticsearch.client.Requests.clusterHealthRequest; -import static org.elasticsearch.client.Requests.createIndexRequest; +import static org.elasticsearch.client.internal.Requests.clusterHealthRequest; +import static org.elasticsearch.client.internal.Requests.createIndexRequest; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index ab301f132591e..b999487a1ce5e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -35,6 +35,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -244,16 +245,11 @@ private static void assertShardStatesMatch( try { assertBusy(waitPredicate, 1, TimeUnit.MINUTES); } catch (AssertionError ae) { - fail( - "failed to observe expect shard states\n" - + "expected: [" - + numShards - + "] shards with states: " - + Strings.arrayToCommaDelimitedString(shardStates) - + "\n" - + "observed:\n" - + stateChangeListener - ); + fail(String.format(Locale.ROOT, """ + failed to observe expect shard states + expected: [%d] shards with states: %s + observed: + %s""", numShards, Strings.arrayToCommaDelimitedString(shardStates), stateChangeListener)); } stateChangeListener.shardStates.clear(); @@ -272,12 +268,12 @@ public void indexShardStateChanged( IndexShardState newState, @Nullable String reason ) { - List shardStates = this.shardStates.putIfAbsent( + List shardStateList = this.shardStates.putIfAbsent( indexShard.shardId(), new CopyOnWriteArrayList<>(new IndexShardState[] { newState }) ); - if (shardStates != null) { - shardStates.add(newState); + if (shardStateList != null) { + shardStateList.add(newState); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index c55487e37accd..383e4613d4d6a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -700,8 +700,8 @@ private static void verify(ActionRequestBuilder requestBuilder, boolean fa private static void verify(ActionRequestBuilder requestBuilder, boolean fail, long expectedCount) { if (fail) { - if (requestBuilder instanceof MultiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get(); + if (requestBuilder instanceof MultiSearchRequestBuilder multiSearchRequestBuilder) { + MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); assertThat(multiSearchResponse.getResponses().length, equalTo(1)); assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue()); @@ -712,11 +712,10 @@ private static void verify(ActionRequestBuilder requestBuilder, boolean fa } catch (IndexNotFoundException | IndexClosedException e) {} } } else { - if (requestBuilder instanceof SearchRequestBuilder) { - SearchRequestBuilder searchRequestBuilder = (SearchRequestBuilder) requestBuilder; + if (requestBuilder instanceof SearchRequestBuilder searchRequestBuilder) { assertHitCount(searchRequestBuilder.get(), expectedCount); - } else if (requestBuilder instanceof MultiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get(); + } else if (requestBuilder instanceof MultiSearchRequestBuilder multiSearchRequestBuilder) { + MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); assertThat(multiSearchResponse.getResponses().length, equalTo(1)); assertThat(multiSearchResponse.getResponses()[0].getResponse(), notNullValue()); } else { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index a969e4b2d1536..5bffa2106dd13 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexManagerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexManagerIT.java index f85dd96fa8bd4..a9b3058770142 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexManagerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexManagerIT.java @@ -13,6 +13,8 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; @@ -34,6 +36,7 @@ import static org.elasticsearch.test.XContentTestUtils.convertToXContent; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class SystemIndexManagerIT extends ESIntegTestCase { @@ -94,6 +97,30 @@ public void testSystemIndexManagerLeavesNewerMappingsAlone() throws Exception { assertBusy(() -> assertMappingsAndSettings(TestSystemIndexDescriptor.getNewMappings())); } + /** + * Ensures that we can clear any blocks that get set on managed system indices. + * + * See https://github.com/elastic/elasticsearch/issues/80814 + */ + public void testBlocksCanBeClearedFromManagedSystemIndices() throws Exception { + internalCluster().startNodes(1); + + // Trigger the creation of the system index + assertAcked(prepareCreate(INDEX_NAME)); + ensureGreen(INDEX_NAME); + + for (IndexMetadata.APIBlock blockType : IndexMetadata.APIBlock.values()) { + enableIndexBlock(INDEX_NAME, blockType.settingName()); + + AcknowledgedResponse removeBlockResp = client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(blockType.settingName(), false)) + .get(); + assertThat(removeBlockResp.isAcknowledged(), is(true)); + } + } + /** * Performs a cluster state update in order to trigger any cluster state listeners - specifically, SystemIndexManager. */ diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java index e84ba41f80ac2..b272da9e5c813 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java @@ -28,16 +28,21 @@ public class ConcurrentDynamicTemplateIT extends ESIntegTestCase { // see #3544 public void testConcurrentDynamicMapping() throws Exception { final String fieldName = "field"; - final String mapping = "{" - + "\"dynamic_templates\": [" - + "{ \"" - + fieldName - + "\": {" - + "\"path_match\": \"*\"," - + "\"mapping\": {" - + "\"type\": \"text\"," - + "\"store\": true," - + "\"analyzer\": \"whitespace\" } } } ] }"; + final String mapping = """ + { + "dynamic_templates": [ + { + "%s": { + "path_match": "*", + "mapping": { + "type": "text", + "store": true, + "analyzer": "whitespace" + } + } + } + ] + }""".formatted(fieldName); // The 'fieldNames' array is used to help with retrieval of index terms // after testing diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java index 276c6790faed2..ec2c9332db4c6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java @@ -33,19 +33,21 @@ protected boolean forbidPrivateIndexSettings() { * to be able to index new documents into them. Indexing should issue a deprecation warning though. */ public void testBWCMalformedDynamicTemplate() { - String mapping = "{ \"dynamic_templates\": [\n" - + " {\n" - + " \"my_template\": {\n" - + " \"mapping\": {\n" - + " \"ignore_malformed\": true,\n" // this parameter is not supported by "keyword" field type - + " \"type\": \"keyword\"\n" - + " },\n" - + " \"path_match\": \"*\"\n" - + " }\n" - + " }\n" - + " ]\n" - + " }\n" - + "}}"; + // this parameter is not supported by "keyword" field type + String mapping = """ + { "dynamic_templates": [ + { + "my_template": { + "mapping": { + "ignore_malformed": true, + "type": "keyword" + }, + "path_match": "*" + } + } + ] + } + }}"""; String indexName = "malformed_dynamic_template"; assertAcked( prepareCreate(indexName).setSettings( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index e4a11db656400..55ff39715faed 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.Priority; @@ -120,25 +120,22 @@ public void testUpdateMappingWithoutType() { .indices() .prepareCreate("test") .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)) - .setMapping("{\"properties\":{\"body\":{\"type\":\"text\"}}}") + .setMapping(""" + {"properties":{"body":{"type":"text"}}} + """) .execute() .actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - AcknowledgedResponse putMappingResponse = client().admin() - .indices() - .preparePutMapping("test") - .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON) - .execute() - .actionGet(); + AcknowledgedResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setSource(""" + {"properties":{"date":{"type":"integer"}}} + """, XContentType.JSON).execute().actionGet(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet(); - assertThat( - getMappingsResponse.mappings().get("test").source().toString(), - equalTo("{\"_doc\":{\"properties\":{\"body\":{\"type\":\"text\"},\"date\":{\"type\":\"integer\"}}}}") - ); + assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo(""" + {"_doc":{"properties":{"body":{"type":"text"},"date":{"type":"integer"}}}}""")); } public void testUpdateMappingWithoutTypeMultiObjects() { @@ -150,20 +147,14 @@ public void testUpdateMappingWithoutTypeMultiObjects() { .actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - AcknowledgedResponse putMappingResponse = client().admin() - .indices() - .preparePutMapping("test") - .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON) - .execute() - .actionGet(); + AcknowledgedResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setSource(""" + {"properties":{"date":{"type":"integer"}}}""", XContentType.JSON).execute().actionGet(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet(); - assertThat( - getMappingsResponse.mappings().get("test").source().toString(), - equalTo("{\"_doc\":{\"properties\":{\"date\":{\"type\":\"integer\"}}}}") - ); + assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo(""" + {"_doc":{"properties":{"date":{"type":"integer"}}}}""")); } public void testUpdateMappingWithConflicts() { @@ -171,18 +162,17 @@ public void testUpdateMappingWithConflicts() { .indices() .prepareCreate("test") .setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)) - .setMapping("{\"properties\":{\"body\":{\"type\":\"text\"}}}") + .setMapping(""" + {"properties":{"body":{"type":"text"}}} + """) .execute() .actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); try { - client().admin() - .indices() - .preparePutMapping("test") - .setSource("{\"_doc\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}", XContentType.JSON) - .execute() - .actionGet(); + client().admin().indices().preparePutMapping("test").setSource(""" + {"_doc":{"properties":{"body":{"type":"integer"}}}} + """, XContentType.JSON).execute().actionGet(); fail("Expected MergeMappingException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [body] cannot be changed from type [text] to [integer]")); @@ -190,19 +180,13 @@ public void testUpdateMappingWithConflicts() { } public void testUpdateMappingWithNormsConflicts() { - client().admin() - .indices() - .prepareCreate("test") - .setMapping("{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": false }}}") - .execute() - .actionGet(); + client().admin().indices().prepareCreate("test").setMapping(""" + {"properties":{"body":{"type":"text", "norms": false }}} + """).execute().actionGet(); try { - client().admin() - .indices() - .preparePutMapping("test") - .setSource("{\"_doc\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": true }}}}", XContentType.JSON) - .execute() - .actionGet(); + client().admin().indices().preparePutMapping("test").setSource(""" + {"_doc":{"properties":{"body":{"type":"text", "norms": true }}}} + """, XContentType.JSON).execute().actionGet(); fail("Expected MergeMappingException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Cannot update parameter [norms] from [false] to [true]")); @@ -217,17 +201,15 @@ public void testUpdateMappingNoChanges() { .indices() .prepareCreate("test") .setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)) - .setMapping("{\"properties\":{\"body\":{\"type\":\"text\"}}}") + .setMapping(""" + {"properties":{"body":{"type":"text"}}}""") .execute() .actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - AcknowledgedResponse putMappingResponse = client().admin() - .indices() - .preparePutMapping("test") - .setSource("{\"_doc\":{\"properties\":{\"body\":{\"type\":\"text\"}}}}", XContentType.JSON) - .execute() - .actionGet(); + AcknowledgedResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setSource(""" + {"_doc":{"properties":{"body":{"type":"text"}}}} + """, XContentType.JSON).execute().actionGet(); // no changes, we return assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); @@ -311,12 +293,9 @@ public void testPutMappingsWithBlocks() { for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { try { enableIndexBlock("test", block); - assertAcked( - client().admin() - .indices() - .preparePutMapping("test") - .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON) - ); + assertAcked(client().admin().indices().preparePutMapping("test").setSource(""" + {"properties":{"date":{"type":"integer"}}} + """, XContentType.JSON)); } finally { disableIndexBlock("test", block); } @@ -325,12 +304,9 @@ public void testPutMappingsWithBlocks() { for (String block : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { try { enableIndexBlock("test", block); - assertBlocked( - client().admin() - .indices() - .preparePutMapping("test") - .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON) - ); + assertBlocked(client().admin().indices().preparePutMapping("test").setSource(""" + {"properties":{"date":{"type":"integer"}}} + """, XContentType.JSON)); } finally { disableIndexBlock("test", block); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java index 2fe8f4301ef22..f9f17d8e1ebbf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java @@ -9,7 +9,7 @@ package org.elasticsearch.indices.memory.breaker; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.sort.SortOrder; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 936196f3ef411..4f2b41788883f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -18,8 +18,8 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.breaker.CircuitBreaker; @@ -166,13 +166,22 @@ public void testRamAccountingTermsEnum() throws Exception { final Client client = client(); // Create an index where the mappings have a field data filter - assertAcked( - prepareCreate("ramtest").setSource( - "{\"mappings\": {\"type\": {\"properties\": {\"test\": " - + "{\"type\": \"text\",\"fielddata\": true,\"fielddata_frequency_filter\": {\"max\": 10000}}}}}}", - XContentType.JSON - ) - ); + assertAcked(prepareCreate("ramtest").setSource(""" + { + "mappings": { + "type": { + "properties": { + "test": { + "type": "text", + "fielddata": true, + "fielddata_frequency_filter": { + "max": 10000 + } + } + } + } + } + }""", XContentType.JSON)); ensureGreen("ramtest"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index 3d29e57ed78a0..a847c6e848e29 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -684,16 +684,13 @@ static void assertIndexIsOpened(final String... indices) { static void assertException(final Throwable throwable, final String indexName) { final Throwable t = ExceptionsHelper.unwrapCause(throwable); - if (t instanceof ClusterBlockException) { - ClusterBlockException clusterBlockException = (ClusterBlockException) t; + if (t instanceof ClusterBlockException clusterBlockException) { assertThat(clusterBlockException.blocks(), hasSize(1)); assertTrue(clusterBlockException.blocks().stream().allMatch(b -> b.id() == MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID)); - } else if (t instanceof IndexClosedException) { - IndexClosedException indexClosedException = (IndexClosedException) t; + } else if (t instanceof IndexClosedException indexClosedException) { assertThat(indexClosedException.getIndex(), notNullValue()); assertThat(indexClosedException.getIndex().getName(), equalTo(indexName)); - } else if (t instanceof IndexNotFoundException) { - IndexNotFoundException indexNotFoundException = (IndexNotFoundException) t; + } else if (t instanceof IndexNotFoundException indexNotFoundException) { assertThat(indexNotFoundException.getIndex(), notNullValue()); assertThat(indexNotFoundException.getIndex().getName(), equalTo(indexName)); } else { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java index b88091cd85707..601daff94e3de 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -85,11 +85,11 @@ public void testCloseWhileRelocatingShards() throws Exception { final String indexName = "index-" + i; int nbDocs = 0; switch (i) { - case 0: + case 0 -> { logger.debug("creating empty index {}", indexName); createIndex(indexName); - break; - case 1: + } + case 1 -> { nbDocs = scaledRandomIntBetween(1, 100); logger.debug("creating index {} with {} documents", indexName, nbDocs); createIndex(indexName); @@ -99,13 +99,14 @@ public void testCloseWhileRelocatingShards() throws Exception { .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) .collect(Collectors.toList()) ); - break; - default: + } + default -> { logger.debug("creating index {} with background indexing", indexName); final BackgroundIndexer indexer = new BackgroundIndexer(indexName, client(), -1, 1); indexers.put(indexName, indexer); indexer.setFailureAssertion(t -> assertException(t, indexName)); waitForDocs(1, indexer); + } } docsPerIndex.put(indexName, (long) nbDocs); indices[i] = indexName; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 926dd772d5153..909fb0add9a10 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -35,7 +35,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; import static org.elasticsearch.indices.state.CloseIndexIT.assertIndexIsClosed; import static org.elasticsearch.indices.state.CloseIndexIT.assertIndexIsOpened; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -352,7 +351,7 @@ public void testOpenCloseIndexWithBlocks() { assertIndexIsClosed("test"); // Opening an index is blocked - for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_READ_ONLY_ALLOW_DELETE, SETTING_BLOCKS_METADATA)) { + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { try { enableIndexBlock("test", blockSetting); assertBlocked(client().admin().indices().prepareOpen("test")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java index 38072f77d7c67..c907324a691cb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java @@ -135,8 +135,8 @@ private Releasable interceptVerifyShardBeforeCloseActions(final String indexPatt internalCluster().getInstance(TransportService.class, node.getName()), (connection, requestId, action, request, options) -> { if (action.startsWith(TransportVerifyShardBeforeCloseAction.NAME)) { - if (request instanceof TransportVerifyShardBeforeCloseAction.ShardRequest) { - final String index = ((TransportVerifyShardBeforeCloseAction.ShardRequest) request).shardId().getIndexName(); + if (request instanceof TransportVerifyShardBeforeCloseAction.ShardRequest shardRequest) { + final String index = shardRequest.shardId().getIndexName(); if (Glob.globMatch(indexPattern, index)) { logger.info("request {} intercepted for index {}", requestId, index); onIntercept.run(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 4ff8e0bd41c39..bd7e57996cc49 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.indices.stats; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -68,6 +66,7 @@ import java.util.Collections; import java.util.EnumSet; import java.util.List; +import java.util.Map; import java.util.Random; import java.util.Set; import java.util.concurrent.BrokenBarrierException; @@ -822,12 +821,12 @@ public void testSegmentsStats() { assertThat(stats.getTotal().getSegments().getCount(), equalTo((long) test1.totalNumShards)); if (includeSegmentFileSizes) { assertThat(stats.getTotal().getSegments().getFiles().size(), greaterThan(0)); - for (ObjectObjectCursor cursor : stats.getTotal().getSegments().getFiles()) { - assertThat(cursor.value.getExt(), notNullValue()); - assertThat(cursor.value.getTotal(), greaterThan(0L)); - assertThat(cursor.value.getCount(), greaterThan(0L)); - assertThat(cursor.value.getMin(), greaterThan(0L)); - assertThat(cursor.value.getMax(), greaterThan(0L)); + for (Map.Entry cursor : stats.getTotal().getSegments().getFiles().entrySet()) { + assertThat(cursor.getValue().getExt(), notNullValue()); + assertThat(cursor.getValue().getTotal(), greaterThan(0L)); + assertThat(cursor.getValue().getCount(), greaterThan(0L)); + assertThat(cursor.getValue().getMin(), greaterThan(0L)); + assertThat(cursor.getValue().getMax(), greaterThan(0L)); } } } @@ -1002,15 +1001,31 @@ public void testMultiIndex() throws Exception { } public void testCompletionFieldsParam() throws Exception { - assertAcked( - prepareCreate("test1").setMapping( - "{ \"properties\": { \"bar\": { \"type\": \"text\", \"fields\": { \"completion\": { \"type\": \"completion\" }}}" - + ",\"baz\": { \"type\": \"text\", \"fields\": { \"completion\": { \"type\": \"completion\" }}}}}" - ) - ); + assertAcked(prepareCreate("test1").setMapping(""" + { + "properties": { + "bar": { + "type": "text", + "fields": { + "completion": { + "type": "completion" + } + } + }, + "baz": { + "type": "text", + "fields": { + "completion": { + "type": "completion" + } + } + } + } + }""")); ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); + client().prepareIndex("test1").setId(Integer.toString(1)).setSource(""" + {"bar":"bar","baz":"baz"}""", XContentType.JSON).get(); refresh(); IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); @@ -1083,64 +1098,27 @@ public void testGroupsParam() throws Exception { private static void set(Flag flag, IndicesStatsRequestBuilder builder, boolean set) { switch (flag) { - case Docs: - builder.setDocs(set); - break; - case FieldData: - builder.setFieldData(set); - break; - case QueryCache: - builder.setQueryCache(set); - break; - case Flush: - builder.setFlush(set); - break; - case Get: - builder.setGet(set); - break; - case Indexing: - builder.setIndexing(set); - break; - case Merge: - builder.setMerge(set); - break; - case Refresh: - builder.setRefresh(set); - break; - case Search: - builder.setSearch(set); - break; - case Store: - builder.setStore(set); - break; - case Warmer: - builder.setWarmer(set); - break; - case Completion: - builder.setCompletion(set); - break; - case Segments: - builder.setSegments(set); - break; - case Translog: - builder.setTranslog(set); - break; - case RequestCache: - builder.setRequestCache(set); - break; - case Recovery: - builder.setRecovery(set); - break; - case Bulk: - builder.setBulk(set); - break; - case Shards: + case Docs -> builder.setDocs(set); + case FieldData -> builder.setFieldData(set); + case QueryCache -> builder.setQueryCache(set); + case Flush -> builder.setFlush(set); + case Get -> builder.setGet(set); + case Indexing -> builder.setIndexing(set); + case Merge -> builder.setMerge(set); + case Refresh -> builder.setRefresh(set); + case Search -> builder.setSearch(set); + case Store -> builder.setStore(set); + case Warmer -> builder.setWarmer(set); + case Completion -> builder.setCompletion(set); + case Segments -> builder.setSegments(set); + case Translog -> builder.setTranslog(set); + case RequestCache -> builder.setRequestCache(set); + case Recovery -> builder.setRecovery(set); + case Bulk -> builder.setBulk(set); + case Shards -> // We don't actually expose shards in IndexStats, but this test fails if it isn't handled builder.request().flags().set(Flag.Shards, set); - break; - default: - fail("new flag? " + flag); - break; + default -> fail("new flag? " + flag); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java index 066e4dad481d2..29c38c07fcbd7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java @@ -22,42 +22,28 @@ public class ComposableTemplateIT extends ESIntegTestCase { // See: https://github.com/elastic/elasticsearch/issues/58643 public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { - ComponentTemplate ct = new ComponentTemplate( - new Template( - null, - new CompressedXContent( - "{\n" - + " \"dynamic\": false,\n" - + " \"properties\": {\n" - + " \"foo\": {\n" - + " \"type\": \"text\"\n" - + " }\n" - + " }\n" - + " }" - ), - null - ), - 3L, - Collections.singletonMap("eggplant", "potato") - ); + ComponentTemplate ct = new ComponentTemplate(new Template(null, new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "foo": { + "type": "text" + } + } + }"""), null), 3L, Collections.singletonMap("eggplant", "potato")); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("my-ct").componentTemplate(ct)).get(); ComposableIndexTemplate cit = new ComposableIndexTemplate( Collections.singletonList("coleslaw"), - new Template( - null, - new CompressedXContent( - "{\n" - + " \"dynamic\": false,\n" - + " \"properties\": {\n" - + " \"foo\": {\n" - + " \"type\": \"keyword\"\n" - + " }\n" - + " }\n" - + " }" - ), - null - ), + new Template(null, new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "foo": { + "type": "keyword" + } + } + }"""), null), Collections.singletonList("my-ct"), 4L, 5L, @@ -71,42 +57,28 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { internalCluster().fullRestart(); ensureGreen(); - ComponentTemplate ct2 = new ComponentTemplate( - new Template( - null, - new CompressedXContent( - "{\n" - + " \"dynamic\": true,\n" - + " \"properties\": {\n" - + " \"foo\": {\n" - + " \"type\": \"keyword\"\n" - + " }\n" - + " }\n" - + " }" - ), - null - ), - 3L, - Collections.singletonMap("eggplant", "potato") - ); + ComponentTemplate ct2 = new ComponentTemplate(new Template(null, new CompressedXContent(""" + { + "dynamic": true, + "properties": { + "foo": { + "type": "keyword" + } + } + }"""), null), 3L, Collections.singletonMap("eggplant", "potato")); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("my-ct").componentTemplate(ct2)).get(); ComposableIndexTemplate cit2 = new ComposableIndexTemplate( Collections.singletonList("coleslaw"), - new Template( - null, - new CompressedXContent( - "{\n" - + " \"dynamic\": true,\n" - + " \"properties\": {\n" - + " \"foo\": {\n" - + " \"type\": \"integer\"\n" - + " }\n" - + " }\n" - + " }" - ), - null - ), + new Template(null, new CompressedXContent(""" + { + "dynamic": true, + "properties": { + "foo": { + "type": "integer" + } + } + }"""), null), Collections.singletonList("my-ct"), 4L, 5L, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index d20113c7cf414..85e188b30c1a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -565,27 +565,19 @@ public void testIndexTemplateWithAliases() throws Exception { } public void testIndexTemplateWithAliasesInSource() { - client().admin() - .indices() - .preparePutTemplate("template_1") - .setSource( - new BytesArray( - "{\n" - + " \"index_patterns\" : \"*\",\n" - + " \"aliases\" : {\n" - + " \"my_alias\" : {\n" - + " \"filter\" : {\n" - + " \"term\" : {\n" - + " \"field\" : \"value2\"\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + "}" - ), - XContentType.JSON - ) - .get(); + client().admin().indices().preparePutTemplate("template_1").setSource(new BytesArray(""" + { + "index_patterns": "*", + "aliases": { + "my_alias": { + "filter": { + "term": { + "field": "value2" + } + } + } + } + }"""), XContentType.JSON).get(); assertAcked(prepareCreate("test_index")); ensureGreen(); @@ -607,24 +599,21 @@ public void testIndexTemplateWithAliasesInSource() { } public void testIndexTemplateWithAliasesSource() { - client().admin() - .indices() - .preparePutTemplate("template_1") - .setPatterns(Collections.singletonList("te*")) - .setAliases( - " {\n" - + " \"alias1\" : {},\n" - + " \"alias2\" : {\n" - + " \"filter\" : {\n" - + " \"term\" : {\n" - + " \"field\" : \"value2\"\n" - + " }\n" - + " }\n" - + " },\n" - + " \"alias3\" : { \"routing\" : \"1\" }" - + " }\n" - ) - .get(); + client().admin().indices().preparePutTemplate("template_1").setPatterns(Collections.singletonList("te*")).setAliases(""" + { + "alias1": {}, + "alias2": { + "filter": { + "term": { + "field": "value2" + } + } + }, + "alias3": { + "routing": "1" + } + } + """).get(); assertAcked(prepareCreate("test_index")); ensureGreen(); @@ -830,25 +819,19 @@ public void testCombineTemplates() throws Exception { // Now, a complete mapping with two separated templates is error // base template - client().admin() - .indices() - .preparePutTemplate("template_1") - .setPatterns(Collections.singletonList("*")) - .setSettings( - " {\n" - + " \"index\" : {\n" - + " \"analysis\" : {\n" - + " \"analyzer\" : {\n" - + " \"custom_1\" : {\n" - + " \"tokenizer\" : \"standard\"\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n", - XContentType.JSON - ) - .get(); + client().admin().indices().preparePutTemplate("template_1").setPatterns(Collections.singletonList("*")).setSettings(""" + { + "index": { + "analysis": { + "analyzer": { + "custom_1": { + "tokenizer": "standard" + } + } + } + } + } + """, XContentType.JSON).get(); // put template using custom_1 analyzer MapperParsingException e = expectThrows( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java index 6c26f64a79133..028a5920c0be0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index a4e2e2034647c..992fca7b635c6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -20,7 +20,7 @@ import java.util.List; -import static org.elasticsearch.client.Requests.nodesInfoRequest; +import static org.elasticsearch.client.internal.Requests.nodesInfoRequest; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java index e94e1ca8b4228..13585756f397c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.operateAllIndices; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -17,6 +15,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.junit.After; +import java.util.Map; + import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -91,8 +91,8 @@ public void testCloseIndexDefaultBehaviour() throws Exception { } ClusterState state = client().admin().cluster().prepareState().get().getState(); - for (ObjectObjectCursor indexMetadataObjectObjectCursor : state.getMetadata().indices()) { - assertEquals(IndexMetadata.State.CLOSE, indexMetadataObjectObjectCursor.value.getState()); + for (Map.Entry indexMetadataEntry : state.getMetadata().indices().entrySet()) { + assertEquals(IndexMetadata.State.CLOSE, indexMetadataEntry.getValue().getState()); } } @@ -126,8 +126,8 @@ public void testOpenIndexDefaultBehaviour() throws Exception { } ClusterState state = client().admin().cluster().prepareState().get().getState(); - for (ObjectObjectCursor indexMetadataObjectObjectCursor : state.getMetadata().indices()) { - assertEquals(IndexMetadata.State.OPEN, indexMetadataObjectObjectCursor.value.getState()); + for (Map.Entry indexMetadataEntry : state.getMetadata().indices().entrySet()) { + assertEquals(IndexMetadata.State.OPEN, indexMetadataEntry.getValue().getState()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java index 9280ce3f24405..91afd101ae952 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java @@ -10,7 +10,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 8fada58de5c4a..afa23673cdc6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -81,19 +81,19 @@ public void testPersistentActionFailure() throws Exception { .get() .getTasks() .get(0); - logger.info("Found running task with id {} and parent {}", firstRunningTask.getId(), firstRunningTask.getParentTaskId()); + logger.info("Found running task with id {} and parent {}", firstRunningTask.id(), firstRunningTask.parentTaskId()); // Verifying parent - assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId)); - assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster")); + assertThat(firstRunningTask.parentTaskId().getId(), equalTo(allocationId)); + assertThat(firstRunningTask.parentTaskId().getNodeId(), equalTo("cluster")); logger.info("Failing the running task"); // Fail the running task and make sure it restarts properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("fail").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("fail").setTargetTaskId(firstRunningTask.taskId()).get().getTasks().size(), equalTo(1) ); - logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId()); + logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.id()); assertBusy(() -> { // Wait for the task to disappear completely assertThat( @@ -118,11 +118,11 @@ public void testPersistentActionCompletion() throws Exception { .get() .getTasks() .get(0); - logger.info("Found running task with id {} and parent {}", firstRunningTask.getId(), firstRunningTask.getParentTaskId()); + logger.info("Found running task with id {} and parent {}", firstRunningTask.id(), firstRunningTask.parentTaskId()); // Verifying parent and description - assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId)); - assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster")); - assertThat(firstRunningTask.getDescription(), equalTo("id=" + taskId)); + assertThat(firstRunningTask.parentTaskId().getId(), equalTo(allocationId)); + assertThat(firstRunningTask.parentTaskId().getNodeId(), equalTo("cluster")); + assertThat(firstRunningTask.description(), equalTo("id=" + taskId)); if (randomBoolean()) { logger.info("Simulating errant completion notification"); @@ -144,7 +144,7 @@ public void testPersistentActionCompletion() throws Exception { ); } - stopOrCancelTask(firstRunningTask.getTaskId()); + stopOrCancelTask(firstRunningTask.taskId()); } public void testPersistentActionWithNoAvailableNode() throws Exception { @@ -169,7 +169,7 @@ public void testPersistentActionWithNoAvailableNode() throws Exception { .get(0); // Verifying the task runs on the new node - assertThat(taskInfo.getTaskId().getNodeId(), equalTo(newNodeId)); + assertThat(taskInfo.taskId().getNodeId(), equalTo(newNodeId)); internalCluster().stopRandomNode(settings -> "test".equals(settings.get("node.attr.test_attr"))); @@ -220,7 +220,7 @@ public void testPersistentActionWithNonClusterStateCondition() throws Exception .get(0); // Verifying the task can now be assigned - assertThat(taskInfo.getTaskId().getNodeId(), notNullValue()); + assertThat(taskInfo.taskId().getNodeId(), notNullValue()); // Remove the persistent task PlainActionFuture> removeFuture = new PlainActionFuture<>(); @@ -255,7 +255,7 @@ public void testPersistentActionStatusUpdate() throws Exception { // Complete the running task and make sure it finishes properly assertThat( new TestTasksRequestBuilder(client()).setOperation("update_status") - .setTaskId(firstRunningTask.getTaskId()) + .setTargetTaskId(firstRunningTask.taskId()) .get() .getTasks() .size(), @@ -296,7 +296,7 @@ public void testPersistentActionStatusUpdate() throws Exception { logger.info("Completing the running task"); // Complete the running task and make sure it finishes properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("finish").setTargetTaskId(firstRunningTask.taskId()).get().getTasks().size(), equalTo(1) ); @@ -327,11 +327,11 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { logger.info("Completing the running task"); // Fail the running task and make sure it restarts properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("finish").setTargetTaskId(firstRunningTask.taskId()).get().getTasks().size(), equalTo(1) ); - logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId()); + logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.id()); assertBusy(() -> { // Wait for the task to disappear completely assertThat( @@ -402,7 +402,7 @@ public void testUnassignRunningPersistentTask() throws Exception { .get() .getTasks() .get(0); - stopOrCancelTask(taskInfo.getTaskId()); + stopOrCancelTask(taskInfo.taskId()); } public void testAbortLocally() throws Exception { @@ -430,12 +430,12 @@ public void testAbortLocally() throws Exception { TestPersistentTasksExecutor.setNonClusterStateCondition(false); // Verifying parent - assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId)); - assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster")); + assertThat(firstRunningTask.parentTaskId().getId(), equalTo(allocationId)); + assertThat(firstRunningTask.parentTaskId().getNodeId(), equalTo("cluster")); assertThat( new TestTasksRequestBuilder(client()).setOperation("abort_locally") - .setTaskId(firstRunningTask.getTaskId()) + .setTargetTaskId(firstRunningTask.taskId()) .get() .getTasks() .size(), @@ -487,19 +487,22 @@ public void testAbortLocally() throws Exception { .get() .getTasks() .get(0); - stopOrCancelTask(taskInfo.getTaskId()); + stopOrCancelTask(taskInfo.taskId()); } private void stopOrCancelTask(TaskId taskId) { if (randomBoolean()) { logger.info("Completing the running task"); // Complete the running task and make sure it finishes properly - assertThat(new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(taskId).get().getTasks().size(), equalTo(1)); + assertThat( + new TestTasksRequestBuilder(client()).setOperation("finish").setTargetTaskId(taskId).get().getTasks().size(), + equalTo(1) + ); } else { logger.info("Cancelling the running task"); // Cancel the running task and make sure it finishes properly - assertThat(client().admin().cluster().prepareCancelTasks().setTaskId(taskId).get().getTasks().size(), equalTo(1)); + assertThat(client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).get().getTasks().size(), equalTo(1)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java index 126c8d347f5f1..7273b5763ba93 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java @@ -8,10 +8,6 @@ package org.elasticsearch.recovery; -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.procedures.IntProcedure; - import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.util.English; import org.elasticsearch.action.ActionFuture; @@ -22,7 +18,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -260,14 +256,14 @@ public void testRelocationWhileIndexingRandom() throws Exception { for (int hit = 0; hit < indexer.totalIndexedDocs(); hit++) { hitIds[hit] = hit + 1; } - IntHashSet set = IntHashSet.from(hitIds); + Set set = Arrays.stream(hitIds).boxed().collect(Collectors.toSet()); for (SearchHit hit : hits.getHits()) { int id = Integer.parseInt(hit.getId()); if (set.remove(id) == false) { logger.error("Extra id [{}]", id); } } - set.forEach((IntProcedure) value -> { logger.error("Missing id [{}]", value); }); + set.forEach(value -> logger.error("Missing id [{}]", value)); } assertThat(hits.getTotalHits().value, equalTo(indexer.totalIndexedDocs())); logger.info("--> DONE search test round {}", i + 1); @@ -741,8 +737,8 @@ public void testRelocationEstablishedPeerRecoveryRetentionLeases() throws Except private void assertActiveCopiesEstablishedPeerRecoveryRetentionLeases() throws Exception { assertBusy(() -> { - for (ObjectCursor it : client().admin().cluster().prepareState().get().getState().metadata().indices().keys()) { - Map> byShardId = Stream.of(client().admin().indices().prepareStats(it.value).get().getShards()) + for (String index : client().admin().cluster().prepareState().get().getState().metadata().indices().keySet()) { + Map> byShardId = Stream.of(client().admin().indices().prepareStats(index).get().getShards()) .collect(Collectors.groupingBy(l -> l.getShardRouting().shardId())); for (List shardStats : byShardId.values()) { Set expectedLeaseIds = shardStats.stream() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java index 3061b0d1e7d06..41879c00c7ad3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java @@ -15,10 +15,10 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; -import static org.elasticsearch.client.Requests.flushRequest; -import static org.elasticsearch.client.Requests.getRequest; -import static org.elasticsearch.client.Requests.indexRequest; -import static org.elasticsearch.client.Requests.refreshRequest; +import static org.elasticsearch.client.internal.Requests.flushRequest; +import static org.elasticsearch.client.internal.Requests.getRequest; +import static org.elasticsearch.client.internal.Requests.indexRequest; +import static org.elasticsearch.client.internal.Requests.refreshRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java index d3f4f7515f455..a5bb89670389c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java @@ -9,7 +9,7 @@ package org.elasticsearch.repositories; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index 0de71d3f8f9c3..fc0636fb82ec1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -205,7 +205,7 @@ public void testResolveSearchRouting() { ); assertThat( - indexNameExpressionResolver.resolveSearchRoutingAllIndices(state.metadata(), "0,1,2,tw , ltw , lw"), + IndexNameExpressionResolver.resolveSearchRoutingAllIndices(state.metadata(), "0,1,2,tw , ltw , lw"), equalTo( newMap( "test1", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java index 7b8e2068f33f8..5ccde81a88b66 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java index 996639aaef4e6..5b89072e9d507 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.IndexRouting; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java index 5e956d499c391..9f51754d6bd1f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java @@ -40,13 +40,9 @@ protected Collection> nodePlugins() { } public void testBasics() { - assertAcked( - client().admin() - .cluster() - .preparePutStoredScript() - .setId("foobar") - .setContent(new BytesArray("{\"script\": {\"lang\": \"" + LANG + "\", \"source\": \"1\"} }"), XContentType.JSON) - ); + assertAcked(client().admin().cluster().preparePutStoredScript().setId("foobar").setContent(new BytesArray(""" + {"script": {"lang": "%s", "source": "1"} } + """.formatted(LANG)), XContentType.JSON)); String script = client().admin().cluster().prepareGetStoredScript("foobar").get().getSource().getSource(); assertNotNull(script); assertEquals("1", script); @@ -57,12 +53,9 @@ public void testBasics() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().admin() - .cluster() - .preparePutStoredScript() - .setId("id#") - .setContent(new BytesArray("{\"script\": {\"lang\": \"" + LANG + "\", \"source\": \"1\"} }"), XContentType.JSON) - .get() + () -> client().admin().cluster().preparePutStoredScript().setId("id#").setContent(new BytesArray(""" + {"script": {"lang": "%s", "source": "1"} } + """.formatted(LANG)), XContentType.JSON).get() ); assertEquals("Validation Failed: 1: id cannot contain '#' for stored script;", e.getMessage()); } @@ -70,15 +63,9 @@ public void testBasics() { public void testMaxScriptSize() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().admin() - .cluster() - .preparePutStoredScript() - .setId("foobar") - .setContent( - new BytesArray("{\"script\": { \"lang\": \"" + LANG + "\"," + " \"source\":\"0123456789abcdef\"} }"), - XContentType.JSON - ) - .get() + () -> client().admin().cluster().preparePutStoredScript().setId("foobar").setContent(new BytesArray(""" + {"script": { "lang": "%s", "source":"0123456789abcdef"} }\ + """.formatted(LANG)), XContentType.JSON).get() ); assertEquals("exceeded max allowed stored script size in bytes [64] with size [65] for script [foobar]", e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index 39b7e5df5f319..9a800c2656c45 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -131,9 +131,13 @@ private void cancelSearch(String action) { TaskInfo searchTask = listTasksResponse.getTasks().get(0); logger.info("Cancelling search"); - CancelTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTaskId(searchTask.getTaskId()).get(); + CancelTasksResponse cancelTasksResponse = client().admin() + .cluster() + .prepareCancelTasks() + .setTargetTaskId(searchTask.taskId()) + .get(); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); - assertThat(cancelTasksResponse.getTasks().get(0).getTaskId(), equalTo(searchTask.getTaskId())); + assertThat(cancelTasksResponse.getTasks().get(0).taskId(), equalTo(searchTask.taskId())); } private SearchResponse ensureSearchWasCancelled(ActionFuture searchResponse) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java index 09a803835ba89..d757392cbaec9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.search; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.CheckedBiConsumer; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java index 42fe90cb43cd7..2496e39617c64 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.plugins.Plugin; @@ -139,19 +140,12 @@ public void testRandomRanges() throws Exception { final int numRanges = randomIntBetween(1, 20); final double[][] ranges = new double[numRanges][]; for (int i = 0; i < ranges.length; ++i) { - switch (randomInt(2)) { - case 0: - ranges[i] = new double[] { Double.NEGATIVE_INFINITY, randomInt(100) }; - break; - case 1: - ranges[i] = new double[] { randomInt(100), Double.POSITIVE_INFINITY }; - break; - case 2: - ranges[i] = new double[] { randomInt(100), randomInt(100) }; - break; - default: - throw new AssertionError(); - } + ranges[i] = switch (randomInt(2)) { + case 0 -> new double[] { Double.NEGATIVE_INFINITY, randomInt(100) }; + case 1 -> new double[] { randomInt(100), Double.POSITIVE_INFINITY }; + case 2 -> new double[] { randomInt(100), randomInt(100) }; + default -> throw new AssertionError(); + }; } RangeAggregationBuilder query = range("range").field("values"); @@ -182,7 +176,7 @@ public void testRandomRanges() throws Exception { Range range = resp.getAggregations().get("range"); List buckets = range.getBuckets(); - HashMap bucketMap = new HashMap<>(buckets.size()); + Map bucketMap = Maps.newMapWithExpectedSize(buckets.size()); for (Bucket bucket : buckets) { bucketMap.put(bucket.getKeyAsString(), bucket); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/TimeSeriesAggregationsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/TimeSeriesAggregationsIT.java new file mode 100644 index 0000000000000..a94fa23530e68 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/TimeSeriesAggregationsIT.java @@ -0,0 +1,522 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.bucket.global.Global; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.metrics.CompensatedSum; +import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.Sum; +import org.elasticsearch.search.aggregations.pipeline.SimpleValue; +import org.elasticsearch.search.aggregations.timeseries.TimeSeries; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; + +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.global; +import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.aggregations.AggregationBuilders.timeSeries; +import static org.elasticsearch.search.aggregations.AggregationBuilders.topHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +@ESIntegTestCase.SuiteScopeTestCase +public class TimeSeriesAggregationsIT extends ESIntegTestCase { + + private static final Map, Map>> data = new HashMap<>(); + private static int numberOfDimensions; + private static int numberOfMetrics; + private static String[][] dimensions; + private static Long[] boundaries; + + @Override + public void setupSuiteScopeCluster() throws Exception { + int numberOfIndices = randomIntBetween(1, 3); + numberOfDimensions = randomIntBetween(1, 5); + numberOfMetrics = randomIntBetween(1, 10); + String[] routingKeys = randomSubsetOf( + randomIntBetween(1, numberOfDimensions), + IntStream.rangeClosed(0, numberOfDimensions - 1).boxed().toArray(Integer[]::new) + ).stream().map(k -> "dim_" + k).toArray(String[]::new); + dimensions = new String[numberOfDimensions][]; + int dimCardinality = 1; + for (int i = 0; i < dimensions.length; i++) { + dimensions[i] = randomUnique(() -> randomAlphaOfLength(10), randomIntBetween(1, 30 / numberOfMetrics)).toArray(new String[0]); + dimCardinality *= dimensions[i].length; + } + + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject("properties"); + for (int i = 0; i < dimensions.length; i++) { + builder.startObject("dim_" + i); + builder.field("type", "keyword"); + builder.field("time_series_dimension", true); + builder.endObject(); + } + for (int i = 0; i < numberOfMetrics; i++) { + builder.startObject("metric_" + i); + builder.field("type", "double"); + builder.endObject(); + } + builder.endObject(); // properties + builder.endObject(); + String start = "2021-01-01T00:00:00Z"; + String end = "2022-01-01T00:00:00Z"; + long startMillis = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(start); + long endMillis = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(end); + Set possibleBoundaries = randomUnique(() -> randomLongBetween(startMillis + 1, endMillis - 1), numberOfIndices - 1); + possibleBoundaries.add(startMillis); + possibleBoundaries.add(endMillis); + boundaries = possibleBoundaries.stream().sorted().toArray(Long[]::new); + for (int i = 0; i < numberOfIndices; i++) { + assertAcked( + prepareCreate("index" + i).setSettings( + Settings.builder() + .put("mode", "time_series") + .put("routing_path", String.join(",", routingKeys)) + .put("index.number_of_shards", randomIntBetween(1, 10)) + .put("time_series.start_time", boundaries[i]) + .put("time_series.end_time", boundaries[i + 1]) + .build() + ).setMapping(builder).addAlias(new Alias("index")).get() + ); + } + + int numberOfDocs = randomIntBetween(dimCardinality, dimCardinality * 5); + logger.info( + "Dimensions: " + + numberOfDimensions + + " metrics: " + + numberOfMetrics + + " documents " + + numberOfDocs + + " cardinality " + + dimCardinality + ); + + List docs = new ArrayList<>(numberOfDocs); + for (int i = 0; i < numberOfDocs; i++) { + XContentBuilder docSource = XContentFactory.jsonBuilder(); + docSource.startObject(); + Map key = new HashMap<>(); + for (int d = 0; d < numberOfDimensions; d++) { + String dim = randomFrom(dimensions[d]); + docSource.field("dim_" + d, dim); + key.put("dim_" + d, dim); + } + Map metrics = new HashMap<>(); + for (int m = 0; m < numberOfMetrics; m++) { + Double val = randomDoubleBetween(0.0, 10000.0, true); + docSource.field("metric_" + m, val); + metrics.put("metric_" + m, val); + } + Map> tsValues = data.get(key); + long timestamp; + if (tsValues == null) { + timestamp = randomLongBetween(startMillis, endMillis - 1); + tsValues = new HashMap<>(); + data.put(key, tsValues); + } else { + timestamp = randomValueOtherThanMany(tsValues::containsKey, () -> randomLongBetween(startMillis, endMillis - 1)); + } + tsValues.put(timestamp, metrics); + docSource.field("@timestamp", timestamp); + docSource.endObject(); + docs.add(client().prepareIndex("index" + findIndex(timestamp)).setOpType(DocWriteRequest.OpType.CREATE).setSource(docSource)); + } + indexRandom(true, false, docs); + } + + public void testStandAloneTimeSeriesAgg() { + SearchResponse response = client().prepareSearch("index").setSize(0).addAggregation(timeSeries("by_ts")).get(); + assertSearchResponse(response); + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + TimeSeries timeSeries = aggregations.get("by_ts"); + assertThat( + timeSeries.getBuckets().stream().map(MultiBucketsAggregation.Bucket::getKey).collect(Collectors.toSet()), + equalTo(data.keySet()) + ); + for (TimeSeries.Bucket bucket : timeSeries.getBuckets()) { + @SuppressWarnings("unchecked") + Map key = (Map) bucket.getKey(); + assertThat((long) data.get(key).size(), equalTo(bucket.getDocCount())); + } + } + + public void testTimeSeriesGroupedByADimension() { + String groupBy = "dim_" + randomIntBetween(0, numberOfDimensions - 1); + SearchResponse response = client().prepareSearch("index") + .setSize(0) + .addAggregation( + terms("by_dim").field(groupBy) + .size(data.size()) + .collectMode(randomFrom(Aggregator.SubAggCollectionMode.values())) + .subAggregation(timeSeries("by_ts")) + ) + .get(); + assertSearchResponse(response); + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + Terms terms = aggregations.get("by_dim"); + Set> keys = new HashSet<>(); + for (Terms.Bucket term : terms.getBuckets()) { + TimeSeries timeSeries = term.getAggregations().get("by_ts"); + for (TimeSeries.Bucket bucket : timeSeries.getBuckets()) { + @SuppressWarnings("unchecked") + Map key = (Map) bucket.getKey(); + assertThat((long) data.get(key).size(), equalTo(bucket.getDocCount())); + assertTrue("key is not unique", keys.add(key)); + assertThat("time series doesn't contain dimensions we grouped by", key.get(groupBy), equalTo(term.getKeyAsString())); + } + } + assertThat(keys, equalTo(data.keySet())); + } + + public void testTimeSeriesGroupedByDateHistogram() { + DateHistogramInterval fixedInterval = DateHistogramInterval.days(randomIntBetween(10, 100)); + SearchResponse response = client().prepareSearch("index") + .setSize(0) + .addAggregation( + dateHistogram("by_time").field("@timestamp") + .fixedInterval(fixedInterval) + .subAggregation(timeSeries("by_ts").subAggregation(stats("timestamp").field("@timestamp"))) + ) + .get(); + assertSearchResponse(response); + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + Histogram histogram = aggregations.get("by_time"); + Map, Long> keys = new HashMap<>(); + for (Histogram.Bucket interval : histogram.getBuckets()) { + long intervalStart = ((ZonedDateTime) interval.getKey()).toEpochSecond() * 1000; + long intervalEnd = intervalStart + fixedInterval.estimateMillis(); + TimeSeries timeSeries = interval.getAggregations().get("by_ts"); + for (TimeSeries.Bucket bucket : timeSeries.getBuckets()) { + @SuppressWarnings("unchecked") + Map key = (Map) bucket.getKey(); + keys.compute(key, (k, v) -> (v == null ? 0 : v) + bucket.getDocCount()); + assertThat(bucket.getDocCount(), lessThanOrEqualTo((long) data.get(key).size())); + Stats stats = bucket.getAggregations().get("timestamp"); + long minTimestamp = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(stats.getMinAsString()); + long maxTimestamp = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(stats.getMaxAsString()); + assertThat(minTimestamp, greaterThanOrEqualTo(intervalStart)); + assertThat(maxTimestamp, lessThan(intervalEnd)); + } + } + assertThat(keys.keySet(), equalTo(data.keySet())); + for (Map.Entry, Long> entry : keys.entrySet()) { + assertThat(entry.getValue(), equalTo((long) data.get(entry.getKey()).size())); + } + } + + public void testStandAloneTimeSeriesAggWithDimFilter() { + boolean include = randomBoolean(); + int dim = randomIntBetween(0, numberOfDimensions - 1); + String val = randomFrom(dimensions[dim]); + QueryBuilder queryBuilder = QueryBuilders.termQuery("dim_" + dim, val); + if (include == false) { + queryBuilder = QueryBuilders.boolQuery().mustNot(queryBuilder); + } + SearchResponse response = client().prepareSearch("index") + .setQuery(queryBuilder) + .setSize(0) + .addAggregation(timeSeries("by_ts")) + .get(); + assertSearchResponse(response); + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + TimeSeries timeSeries = aggregations.get("by_ts"); + Map, Map>> filteredData = dataFilteredByDimension("dim_" + dim, val, include); + assertThat( + timeSeries.getBuckets().stream().map(MultiBucketsAggregation.Bucket::getKey).collect(Collectors.toSet()), + equalTo(filteredData.keySet()) + ); + for (TimeSeries.Bucket bucket : timeSeries.getBuckets()) { + @SuppressWarnings("unchecked") + Map key = (Map) bucket.getKey(); + assertThat(bucket.getDocCount(), equalTo((long) filteredData.get(key).size())); + } + } + + public void testStandAloneTimeSeriesAggWithGlobalAggregation() { + boolean include = randomBoolean(); + int dim = randomIntBetween(0, numberOfDimensions - 1); + int metric = randomIntBetween(0, numberOfMetrics - 1); + String val = randomFrom(dimensions[dim]); + QueryBuilder queryBuilder = QueryBuilders.termQuery("dim_" + dim, val); + if (include == false) { + queryBuilder = QueryBuilders.boolQuery().mustNot(queryBuilder); + } + SearchResponse response = client().prepareSearch("index") + .setQuery(queryBuilder) + .setSize(0) + .addAggregation(timeSeries("by_ts").subAggregation(sum("filter_sum").field("metric_" + metric))) + .addAggregation(global("everything").subAggregation(sum("all_sum").field("metric_" + metric))) + .addAggregation(PipelineAggregatorBuilders.sumBucket("total_filter_sum", "by_ts>filter_sum")) + .get(); + assertSearchResponse(response); + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + TimeSeries timeSeries = aggregations.get("by_ts"); + Map, Map>> filteredData = dataFilteredByDimension("dim_" + dim, val, include); + assertThat( + timeSeries.getBuckets().stream().map(MultiBucketsAggregation.Bucket::getKey).collect(Collectors.toSet()), + equalTo(filteredData.keySet()) + ); + for (TimeSeries.Bucket bucket : timeSeries.getBuckets()) { + @SuppressWarnings("unchecked") + Map key = (Map) bucket.getKey(); + assertThat(bucket.getDocCount(), equalTo((long) filteredData.get(key).size())); + } + SimpleValue obj = aggregations.get("total_filter_sum"); + assertThat(obj.value(), closeTo(sumByMetric(filteredData, "metric_" + metric), obj.value() * 0.0001)); + + Global global = aggregations.get("everything"); + Sum allSum = global.getAggregations().get("all_sum"); + assertThat(allSum.getValue(), closeTo(sumByMetric(data, "metric_" + metric), allSum.getValue() * 0.0001)); + + ElasticsearchException e = expectThrows( + ElasticsearchException.class, + () -> client().prepareSearch("index") + .setQuery(QueryBuilders.termQuery("dim_" + dim, val)) + .setSize(0) + .addAggregation(global("everything").subAggregation(timeSeries("by_ts"))) + .get() + ); + assertThat(e.getRootCause().getMessage(), containsString("Time series aggregations cannot be used inside global aggregation.")); + } + + public void testStandAloneTimeSeriesAggWithMetricFilter() { + boolean above = randomBoolean(); + int metric = randomIntBetween(0, numberOfMetrics - 1); + double val = randomDoubleBetween(0, 100000, true); + RangeQueryBuilder queryBuilder = QueryBuilders.rangeQuery("metric_" + metric); + if (above) { + queryBuilder.gt(val); + } else { + queryBuilder.lte(val); + } + SearchResponse response = client().prepareSearch("index") + .setQuery(queryBuilder) + .setSize(0) + .addAggregation(timeSeries("by_ts")) + .get(); + assertSearchResponse(response); + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + TimeSeries timeSeries = aggregations.get("by_ts"); + Map, Map>> filteredData = dataFilteredByMetric(data, "metric_" + metric, val, above); + assertThat( + timeSeries.getBuckets().stream().map(MultiBucketsAggregation.Bucket::getKey).collect(Collectors.toSet()), + equalTo(filteredData.keySet()) + ); + for (TimeSeries.Bucket bucket : timeSeries.getBuckets()) { + @SuppressWarnings("unchecked") + Map key = (Map) bucket.getKey(); + assertThat(bucket.getDocCount(), equalTo((long) filteredData.get(key).size())); + } + } + + public void testRetrievingHits() { + Map.Entry filterMetric = randomMetricAndValue(data); + double lowerVal = filterMetric.getValue() - randomDoubleBetween(0, 100000, true); + double upperVal = filterMetric.getValue() + randomDoubleBetween(0, 100000, true); + Map, Map>> filteredData = dataFilteredByMetric( + dataFilteredByMetric(data, filterMetric.getKey(), upperVal, false), + filterMetric.getKey(), + lowerVal, + true + ); + QueryBuilder queryBuilder = QueryBuilders.rangeQuery(filterMetric.getKey()).gt(lowerVal).lte(upperVal); + int expectedSize = count(filteredData); + ElasticsearchException e = expectThrows( + ElasticsearchException.class, + () -> client().prepareSearch("index") + .setQuery(queryBuilder) + .setSize(expectedSize * 2) + .addAggregation(timeSeries("by_ts").subAggregation(topHits("hits").size(100))) + .addAggregation(topHits("top_hits").size(100)) // top level top hits + .get() + ); + assertThat(e.getDetailedMessage(), containsString("Top hits aggregations cannot be used together with time series aggregations")); + // TODO: Fix the top hits aggregation + } + + /** + * Filters the test data by only including or excluding certain results + * @param dimension name of the dimension to be filtered + * @param value name of the dimension to be filtered + * @param include true if all records with this dimension should be included, false otherwise + * @return filtered map + */ + private static Map, Map>> dataFilteredByDimension( + String dimension, + String value, + boolean include + ) { + Map, Map>> newMap = new HashMap<>(); + for (Map.Entry, Map>> entry : data.entrySet()) { + if (value.equals(entry.getKey().get(dimension)) == include) { + newMap.put(entry.getKey(), entry.getValue()); + } + } + return newMap; + } + + /** + * Filters the test data by only including or excluding certain results + * @param data data to be filtered + * @param metric name of the metric the records should be filtered by + * @param value value of the metric + * @param above true if all records above the value should be included, false otherwise + * @return filtered map + */ + private static Map, Map>> dataFilteredByMetric( + Map, Map>> data, + String metric, + double value, + boolean above + ) { + Map, Map>> newMap = new HashMap<>(); + for (Map.Entry, Map>> entry : data.entrySet()) { + Map> values = new HashMap<>(); + for (Map.Entry> doc : entry.getValue().entrySet()) { + Double docVal = doc.getValue().get(metric); + if (docVal != null && (docVal > value == above)) { + values.put(doc.getKey(), doc.getValue()); + } + } + if (values.isEmpty() == false) { + newMap.put(entry.getKey(), values); + } + } + return newMap; + } + + private static Double sumByMetric(Map, Map>> data, String metric) { + final CompensatedSum kahanSummation = new CompensatedSum(0, 0); + for (Map.Entry, Map>> entry : data.entrySet()) { + for (Map.Entry> doc : entry.getValue().entrySet()) { + Double docVal = doc.getValue().get(metric); + if (docVal != null) { + kahanSummation.add(docVal); + } + } + } + return kahanSummation.value(); + } + + private static int count(Map, Map>> data) { + int size = 0; + for (Map.Entry, Map>> entry : data.entrySet()) { + size += entry.getValue().entrySet().size(); + } + return size; + } + + private static int findIndex(long timestamp) { + for (int i = 0; i < boundaries.length - 1; i++) { + if (timestamp < boundaries[i + 1]) { + return i; + } + } + throw new IllegalArgumentException("Cannot find index for timestamp " + timestamp); + } + + private static Map.Entry randomMetricAndValue(Map, Map>> data) { + return randomFrom( + randomFrom(randomFrom(data.entrySet().stream().toList()).getValue().entrySet().stream().toList()).getValue() + .entrySet() + .stream() + .toList() + ); + } + + public void testGetHitsFailure() throws Exception { + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put("mode", "time_series") + .put("routing_path", "key") + .put("time_series.start_time", "2021-01-01T00:00:00Z") + .put("time_series.end_time", "2022-01-01T00:00:00Z") + .put("number_of_shards", 1) + .build() + ).setMapping("key", "type=keyword,time_series_dimension=true", "val", "type=double").get() + ); + + client().prepareBulk() + .add(client().prepareIndex("test").setId("2").setSource("key", "bar", "val", 2, "@timestamp", "2021-01-01T00:00:10Z")) + .add(client().prepareIndex("test").setId("1").setSource("key", "bar", "val", 10, "@timestamp", "2021-01-01T00:00:00Z")) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareBulk() + .add(client().prepareIndex("test").setId("4").setSource("key", "bar", "val", 50, "@timestamp", "2021-01-01T00:00:30Z")) + .add(client().prepareIndex("test").setId("3").setSource("key", "bar", "val", 40, "@timestamp", "2021-01-01T00:00:20Z")) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareBulk() + .add(client().prepareIndex("test").setId("7").setSource("key", "foo", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) + .add(client().prepareIndex("test").setId("8").setSource("key", "foo", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) + .add(client().prepareIndex("test").setId("5").setSource("key", "baz", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) + .add(client().prepareIndex("test").setId("6").setSource("key", "baz", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + QueryBuilder queryBuilder = QueryBuilders.rangeQuery("@timestamp").lte("2021-01-01T00:10:00Z"); + SearchResponse response = client().prepareSearch("test") + .setQuery(queryBuilder) + .setSize(10) + .addSort("key", SortOrder.ASC) + .addSort("@timestamp", SortOrder.ASC) + .get(); + assertSearchResponse(response); + + response = client().prepareSearch("test").setQuery(queryBuilder).setSize(10).addAggregation(timeSeries("by_ts")).get(); + assertSearchResponse(response); + + } + +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java index 1a95ec98b0087..45750663c3346 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java @@ -8,14 +8,13 @@ package org.elasticsearch.search.aggregations.bucket; +import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrix; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrix.Bucket; @@ -276,20 +275,28 @@ public void testWithSubAggregation() throws Exception { } - public void testTooLargeMatrix() throws Exception { + public void testTooLargeMatrix() { - // Create more filters than is permitted by Lucene Bool clause settings. - MapBuilder filtersMap = new MapBuilder(); - int maxFilters = SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.get(Settings.EMPTY); - for (int i = 0; i <= maxFilters; i++) { - filtersMap.add("tag" + i, termQuery("tag", "tag" + i)); - } + int originalMaxClauses = IndexSearcher.getMaxClauseCount(); try { - client().prepareSearch("idx").addAggregation(adjacencyMatrix("tags", "\t", filtersMap)).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - assertThat(ex.getCause().getMessage(), containsString("Number of filters is too large")); + // Create more filters than is permitted by Lucene Bool clause settings. + MapBuilder filtersMap = new MapBuilder(); + int maxFilters = randomIntBetween(50, 100); + IndexSearcher.setMaxClauseCount(maxFilters); + for (int i = 0; i <= maxFilters; i++) { + filtersMap.add("tag" + i, termQuery("tag", "tag" + i)); + } + + try { + client().prepareSearch("idx").addAggregation(adjacencyMatrix("tags", "\t", filtersMap)).get(); + fail("SearchPhaseExecutionException should have been thrown"); + } catch (SearchPhaseExecutionException ex) { + assertThat(ex.getCause().getMessage(), containsString("Number of filters is too large")); + } + + } finally { + IndexSearcher.setMaxClauseCount(originalMaxClauses); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index bc0cb683780ec..103c03564e5a6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -43,24 +43,21 @@ public void setupSuiteScopeCluster() throws Exception { } final boolean[] multiValue; switch (randomInt(3)) { - case 0: - multiValue = new boolean[0]; - break; - case 1: + case 0 -> multiValue = new boolean[0]; + case 1 -> { numMultiFalses++; multiValue = new boolean[] { false }; - break; - case 2: + } + case 2 -> { numMultiTrues++; multiValue = new boolean[] { true }; - break; - case 3: + } + case 3 -> { numMultiFalses++; numMultiTrues++; multiValue = new boolean[] { false, true }; - break; - default: - throw new AssertionError(); + } + default -> throw new AssertionError(); } builders[i] = client().prepareIndex("idx") .setSource( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index cb2bf993a6d2c..27344b94fcaf3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -623,8 +623,7 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { ElasticsearchException[] rootCauses = e.guessRootCauses(); if (rootCauses.length == 1) { ElasticsearchException rootCause = rootCauses[0]; - if (rootCause instanceof AggregationExecutionException) { - AggregationExecutionException aggException = (AggregationExecutionException) rootCause; + if (rootCause instanceof AggregationExecutionException aggException) { assertThat(aggException.getMessage(), Matchers.startsWith("Invalid aggregation order path")); } else { throw e; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index bdbe95160bee1..8f9f544beac35 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -669,8 +669,7 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { ElasticsearchException[] rootCauses = e.guessRootCauses(); if (rootCauses.length == 1) { ElasticsearchException rootCause = rootCauses[0]; - if (rootCause instanceof AggregationExecutionException) { - AggregationExecutionException aggException = (AggregationExecutionException) rootCause; + if (rootCause instanceof AggregationExecutionException aggException) { assertThat(aggException.getMessage(), Matchers.startsWith("Invalid aggregation order path")); } else { throw e; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 7441378c2aa4a..7632bac9739e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -432,26 +432,50 @@ public void testParentFilterResolvedCorrectly() throws Exception { ensureGreen("idx2"); List indexRequests = new ArrayList<>(2); - indexRequests.add( - client().prepareIndex("idx2") - .setId("1") - .setSource( - "{\"dates\": {\"month\": {\"label\": \"2014-11\", \"end\": \"2014-11-30\", \"start\": \"2014-11-01\"}, " - + "\"day\": \"2014-11-30\"}, \"comments\": [{\"cid\": 3,\"identifier\": \"29111\"}, {\"cid\": 4,\"tags\": [" - + "{\"tid\" :44,\"name\": \"Roles\"}], \"identifier\": \"29101\"}]}", - XContentType.JSON - ) - ); - indexRequests.add( - client().prepareIndex("idx2") - .setId("2") - .setSource( - "{\"dates\": {\"month\": {\"label\": \"2014-12\", \"end\": \"2014-12-31\", \"start\": \"2014-12-01\"}, " - + "\"day\": \"2014-12-03\"}, \"comments\": [{\"cid\": 1, \"identifier\": \"29111\"}, {\"cid\": 2,\"tags\": [" - + "{\"tid\" : 22, \"name\": \"DataChannels\"}], \"identifier\": \"29101\"}]}", - XContentType.JSON - ) - ); + indexRequests.add(client().prepareIndex("idx2").setId("1").setSource(""" + { + "dates": { + "month": { + "label": "2014-11", + "end": "2014-11-30", + "start": "2014-11-01" + }, + "day": "2014-11-30" + }, + "comments": [ + { + "cid": 3, + "identifier": "29111" + }, + { + "cid": 4, + "tags": [ { "tid": 44, "name": "Roles" } ], + "identifier": "29101" + } + ] + }""", XContentType.JSON)); + indexRequests.add(client().prepareIndex("idx2").setId("2").setSource(""" + { + "dates": { + "month": { + "label": "2014-12", + "end": "2014-12-31", + "start": "2014-12-01" + }, + "day": "2014-12-03" + }, + "comments": [ + { + "cid": 1, + "identifier": "29111" + }, + { + "cid": 2, + "tags": [ { "tid": 22, "name": "DataChannels" } ], + "identifier": "29101" + } + ] + }""", XContentType.JSON)); indexRandom(true, indexRequests); SearchResponse response = client().prepareSearch("idx2") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index adc5651cc728a..ce85fc300716e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -149,40 +150,49 @@ public void testXContentResponse() throws Exception { classes.toXContent(responseBuilder, ToXContent.EMPTY_PARAMS); responseBuilder.endObject(); - String result = "{\"class\":{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0," - + "\"buckets\":[" - + "{" - + "\"key\":\"0\"," - + "\"doc_count\":4," - + "\"sig_terms\":{" - + "\"doc_count\":4," - + "\"bg_count\":7," - + "\"buckets\":[" - + "{" - + "\"key\":" - + (type.equals("long") ? "0," : "\"0\",") - + "\"doc_count\":4," - + "\"score\":0.39999999999999997," - + "\"bg_count\":5" - + "}" - + "]" - + "}" - + "}," - + "{" - + "\"key\":\"1\"," - + "\"doc_count\":3," - + "\"sig_terms\":{" - + "\"doc_count\":3," - + "\"bg_count\":7," - + "\"buckets\":[" - + "{" - + "\"key\":" - + (type.equals("long") ? "1," : "\"1\",") - + "\"doc_count\":3," - + "\"score\":0.75," - + "\"bg_count\":4" - + "}]}}]}}"; - assertThat(Strings.toString(responseBuilder), equalTo(result)); + String result = """ + { + "class": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "0", + "doc_count": 4, + "sig_terms": { + "doc_count": 4, + "bg_count": 7, + "buckets": [ + { + "key": %s, + "doc_count": 4, + "score": 0.39999999999999997, + "bg_count": 5 + } + ] + } + }, + { + "key": "1", + "doc_count": 3, + "sig_terms": { + "doc_count": 3, + "bg_count": 7, + "buckets": [ + { + "key":%s, + "doc_count": 3, + "score": 0.75, + "bg_count": 4 + } + ] + } + } + ] + } + } + """.formatted(type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\""); + assertThat(Strings.toString(responseBuilder), equalTo(XContentHelper.stripWhitespace(result))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index 3735537c3fa36..fa8fa7d9c90a3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -646,8 +646,7 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { ElasticsearchException[] rootCauses = e.guessRootCauses(); if (rootCauses.length == 1) { ElasticsearchException rootCause = rootCauses[0]; - if (rootCause instanceof AggregationExecutionException) { - AggregationExecutionException aggException = (AggregationExecutionException) rootCause; + if (rootCause instanceof AggregationExecutionException aggException) { assertThat(aggException.getMessage(), startsWith("Invalid aggregation order path")); } else { throw e; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java deleted file mode 100644 index 6777071b749fa..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java +++ /dev/null @@ -1,597 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.aggregations.metrics; - -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.fielddata.ScriptDocValues; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.MockScriptPlugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.bucket.global.Global; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.test.ESIntegTestCase; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - -import static java.util.Collections.emptyMap; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality; -import static org.elasticsearch.search.aggregations.AggregationBuilders.global; -import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.notNullValue; - -@ESIntegTestCase.SuiteScopeTestCase -public class CardinalityIT extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(CustomScriptPlugin.class); - } - - public static class CustomScriptPlugin extends MockScriptPlugin { - - @Override - protected Map, Object>> pluginScripts() { - Map, Object>> scripts = new HashMap<>(); - - scripts.put("_value", vars -> vars.get("_value")); - - scripts.put("doc['str_value'].value", vars -> { - Map doc = (Map) vars.get("doc"); - return doc.get("str_value"); - }); - - scripts.put("doc['str_values']", vars -> { - Map doc = (Map) vars.get("doc"); - ScriptDocValues.Strings strValue = (ScriptDocValues.Strings) doc.get("str_values"); - return strValue; - }); - - scripts.put("doc[' + singleNumericField() + '].value", vars -> { - Map doc = (Map) vars.get("doc"); - return doc.get(singleNumericField()); - }); - - scripts.put("doc[' + multiNumericField(false) + ']", vars -> { - Map doc = (Map) vars.get("doc"); - return (ScriptDocValues) doc.get(multiNumericField(false)); - }); - - return scripts; - } - - @Override - protected Map, Object>> nonDeterministicPluginScripts() { - Map, Object>> scripts = new HashMap<>(); - - scripts.put("Math.random()", vars -> CardinalityIT.randomDouble()); - - return scripts; - } - } - - @Override - public Settings indexSettings() { - return Settings.builder() - .put("index.number_of_shards", numberOfShards()) - .put("index.number_of_replicas", numberOfReplicas()) - .build(); - } - - static long numDocs; - static long precisionThreshold; - - @Override - public void setupSuiteScopeCluster() throws Exception { - - prepareCreate("idx").setMapping( - jsonBuilder().startObject() - .startObject("_doc") - .startObject("properties") - .startObject("str_value") - .field("type", "keyword") - .endObject() - .startObject("str_values") - .field("type", "keyword") - .endObject() - .startObject("l_value") - .field("type", "long") - .endObject() - .startObject("l_values") - .field("type", "long") - .endObject() - .startObject("d_value") - .field("type", "double") - .endObject() - .startObject("d_values") - .field("type", "double") - .endObject() - .endObject() - .endObject() - .endObject() - ).get(); - - numDocs = randomIntBetween(2, 100); - precisionThreshold = randomIntBetween(0, 1 << randomInt(20)); - IndexRequestBuilder[] builders = new IndexRequestBuilder[(int) numDocs]; - for (int i = 0; i < numDocs; ++i) { - builders[i] = client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("str_value", "s" + i) - .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) - .field("l_value", i) - .array("l_values", new int[] { i * 2, i * 2 + 1 }) - .field("d_value", i) - .array("d_values", new double[] { i * 2, i * 2 + 1 }) - .endObject() - ); - } - indexRandom(true, builders); - createIndex("idx_unmapped"); - - IndexRequestBuilder[] dummyDocsBuilder = new IndexRequestBuilder[10]; - for (int i = 0; i < dummyDocsBuilder.length; i++) { - dummyDocsBuilder[i] = client().prepareIndex("idx").setSource("a_field", "1"); - } - indexRandom(true, dummyDocsBuilder); - - ensureSearchable(); - } - - private void assertCount(Cardinality count, long value) { - if (value <= precisionThreshold) { - // linear counting should be picked, and should be accurate - assertEquals(value, count.getValue()); - } else { - // error is not bound, so let's just make sure it is > 0 - assertThat(count.getValue(), greaterThan(0L)); - } - } - - private static String singleNumericField() { - return randomBoolean() ? "l_value" : "d_value"; - } - - private static String multiNumericField(boolean hash) { - return randomBoolean() ? "l_values" : "d_values"; - } - - public void testUnmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, 0); - } - - public void testPartiallyUnmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs); - } - - public void testSingleValuedString() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs); - } - - public void testSingleValuedNumeric() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs); - } - - public void testSingleValuedNumericGetProperty() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField()) - ) - ) - .get(); - - assertSearchResponse(searchResponse); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - // assertThat(global.getDocCount(), equalTo(numDocs)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Cardinality cardinality = global.getAggregations().get("cardinality"); - assertThat(cardinality, notNullValue()); - assertThat(cardinality.getName(), equalTo("cardinality")); - long expectedValue = numDocs; - assertCount(cardinality, expectedValue); - assertThat(((InternalAggregation) global).getProperty("cardinality"), equalTo(cardinality)); - assertThat(((InternalAggregation) global).getProperty("cardinality.value"), equalTo((double) cardinality.getValue())); - assertThat((double) ((InternalAggregation) cardinality).getProperty("value"), equalTo((double) cardinality.getValue())); - } - - public void testSingleValuedNumericHashed() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs); - } - - public void testMultiValuedString() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values")) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs * 2); - } - - public void testMultiValuedNumeric() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false))) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs * 2); - } - - public void testMultiValuedNumericHashed() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(true))) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs * 2); - } - - public void testSingleValuedStringScript() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality").precisionThreshold(precisionThreshold) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_value'].value", emptyMap())) - ) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs); - } - - public void testMultiValuedStringScript() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality").precisionThreshold(precisionThreshold) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_values']", emptyMap())) - ) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs * 2); - } - - public void testSingleValuedNumericScript() throws Exception { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc[' + singleNumericField() + '].value", emptyMap()); - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs); - } - - public void testMultiValuedNumericScript() throws Exception { - Script script = new Script( - ScriptType.INLINE, - CustomScriptPlugin.NAME, - "doc[' + multiNumericField(false) + ']", - Collections.emptyMap() - ); - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs * 2); - } - - public void testSingleValuedStringValueScript() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality").precisionThreshold(precisionThreshold) - .field("str_value") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) - ) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs); - } - - public void testMultiValuedStringValueScript() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality").precisionThreshold(precisionThreshold) - .field("str_values") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) - ) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs * 2); - } - - public void testSingleValuedNumericValueScript() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality").precisionThreshold(precisionThreshold) - .field(singleNumericField()) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) - ) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs); - } - - public void testMultiValuedNumericValueScript() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality").precisionThreshold(precisionThreshold) - .field(multiNumericField(false)) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) - ) - .get(); - - assertSearchResponse(response); - - Cardinality count = response.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, numDocs * 2); - } - - public void testAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("str_value") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - Cardinality count = bucket.getAggregations().get("cardinality"); - assertThat(count, notNullValue()); - assertThat(count.getName(), equalTo("cardinality")); - assertCount(count, 2); - } - } - - /** - * Make sure that a request using a deterministic script or not using a script get cached. - * Ensure requests using nondeterministic scripts do not get cached. - */ - public void testScriptCaching() throws Exception { - assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get() - ); - indexRandom( - true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) - ); - - // Make sure we are starting with a clear cache - assertThat( - client().admin() - .indices() - .prepareStats("cache_test_idx") - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache() - .getHitCount(), - equalTo(0L) - ); - assertThat( - client().admin() - .indices() - .prepareStats("cache_test_idx") - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache() - .getMissCount(), - equalTo(0L) - ); - - // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx") - .setSize(0) - .addAggregation( - cardinality("foo").field("d").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertSearchResponse(r); - - assertThat( - client().admin() - .indices() - .prepareStats("cache_test_idx") - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache() - .getHitCount(), - equalTo(0L) - ); - assertThat( - client().admin() - .indices() - .prepareStats("cache_test_idx") - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache() - .getMissCount(), - equalTo(0L) - ); - - // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx") - .setSize(0) - .addAggregation( - cardinality("foo").field("d").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) - ) - .get(); - assertSearchResponse(r); - - assertThat( - client().admin() - .indices() - .prepareStats("cache_test_idx") - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache() - .getHitCount(), - equalTo(0L) - ); - assertThat( - client().admin() - .indices() - .prepareStats("cache_test_idx") - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache() - .getMissCount(), - equalTo(1L) - ); - - // Ensure that non-scripted requests are cached as normal - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(cardinality("foo").field("d")).get(); - assertSearchResponse(r); - - assertThat( - client().admin() - .indices() - .prepareStats("cache_test_idx") - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache() - .getHitCount(), - equalTo(0L) - ); - assertThat( - client().admin() - .indices() - .prepareStats("cache_test_idx") - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache() - .getMissCount(), - equalTo(2L) - ); - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 46b6421086703..3e85caa8e86f3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -57,17 +57,11 @@ private static double[] randomPercents(long minValue, long maxValue) { final int length = randomIntBetween(1, 20); final double[] percents = new double[length]; for (int i = 0; i < percents.length; ++i) { - switch (randomInt(20)) { - case 0: - percents[i] = minValue; - break; - case 1: - percents[i] = maxValue; - break; - default: - percents[i] = (randomDouble() * (maxValue - minValue)) + minValue; - break; - } + percents[i] = switch (randomInt(20)) { + case 0 -> minValue; + case 1 -> maxValue; + default -> (randomDouble() * (maxValue - minValue)) + minValue; + }; } Arrays.sort(percents); LogManager.getLogger(HDRPercentileRanksIT.class).info("Using values={}", Arrays.toString(percents)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 0a5d19b7af884..a486f553d2bf9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -61,15 +61,9 @@ private static double[] randomPercentiles() { final Set uniquedPercentiles = new HashSet<>(); while (uniquedPercentiles.size() < length) { switch (randomInt(20)) { - case 0: - uniquedPercentiles.add(0.0); - break; - case 1: - uniquedPercentiles.add(100.0); - break; - default: - uniquedPercentiles.add(randomDouble() * 100); - break; + case 0 -> uniquedPercentiles.add(0.0); + case 1 -> uniquedPercentiles.add(100.0); + default -> uniquedPercentiles.add(randomDouble() * 100); } } double[] percentiles = uniquedPercentiles.stream().mapToDouble(Double::doubleValue).sorted().toArray(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 032aaf3ba37eb..7201164ff0a24 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -303,63 +303,21 @@ public void setupSuiteScopeCluster() throws Exception { // When using the MockScriptPlugin we can map Stored scripts to inline scripts: // the id of the stored script is used in test method while the source of the stored script // must match a predefined script from CustomScriptPlugin.pluginScripts() method - assertAcked( - client().admin() - .cluster() - .preparePutStoredScript() - .setId("initScript_stored") - .setContent( - new BytesArray( - "{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + " \"source\": \"vars.multiplier = 3\"} }" - ), - XContentType.JSON - ) - ); + assertAcked(client().admin().cluster().preparePutStoredScript().setId("initScript_stored").setContent(new BytesArray(""" + {"script": {"lang": "%s", "source": "vars.multiplier = 3"} } + """.formatted(MockScriptPlugin.NAME)), XContentType.JSON)); - assertAcked( - client().admin() - .cluster() - .preparePutStoredScript() - .setId("mapScript_stored") - .setContent( - new BytesArray( - "{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + " \"source\": \"state.list.add(vars.multiplier)\"} }" - ), - XContentType.JSON - ) - ); + assertAcked(client().admin().cluster().preparePutStoredScript().setId("mapScript_stored").setContent(new BytesArray(""" + {"script": {"lang": "%s", "source": "state.list.add(vars.multiplier)"} } + """.formatted(MockScriptPlugin.NAME)), XContentType.JSON)); - assertAcked( - client().admin() - .cluster() - .preparePutStoredScript() - .setId("combineScript_stored") - .setContent( - new BytesArray( - "{\"script\": {\"lang\": \"" - + MockScriptPlugin.NAME - + "\"," - + " \"source\": \"sum state values as a new aggregation\"} }" - ), - XContentType.JSON - ) - ); + assertAcked(client().admin().cluster().preparePutStoredScript().setId("combineScript_stored").setContent(new BytesArray(""" + {"script": {"lang": "%s", "source": "sum state values as a new aggregation"} } + """.formatted(MockScriptPlugin.NAME)), XContentType.JSON)); - assertAcked( - client().admin() - .cluster() - .preparePutStoredScript() - .setId("reduceScript_stored") - .setContent( - new BytesArray( - "{\"script\": {\"lang\": \"" - + MockScriptPlugin.NAME - + "\"," - + " \"source\": \"sum all states (lists) values as a new aggregation\"} }" - ), - XContentType.JSON - ) - ); + assertAcked(client().admin().cluster().preparePutStoredScript().setId("reduceScript_stored").setContent(new BytesArray(""" + {"script": {"lang": "%s", "source": "sum all states (lists) values as a new aggregation"} } + """.formatted(MockScriptPlugin.NAME)), XContentType.JSON)); indexRandom(true, builders); ensureSearchable(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 7a8cb1fc0d1e4..ba8ba966a68e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -57,17 +57,11 @@ private static double[] randomPercents(long minValue, long maxValue) { final int length = randomIntBetween(1, 20); final double[] percents = new double[length]; for (int i = 0; i < percents.length; ++i) { - switch (randomInt(20)) { - case 0: - percents[i] = minValue; - break; - case 1: - percents[i] = maxValue; - break; - default: - percents[i] = (randomDouble() * (maxValue - minValue)) + minValue; - break; - } + percents[i] = switch (randomInt(20)) { + case 0 -> minValue; + case 1 -> maxValue; + default -> (randomDouble() * (maxValue - minValue)) + minValue; + }; } Arrays.sort(percents); LogManager.getLogger(TDigestPercentileRanksIT.class).info("Using values={}", Arrays.toString(percents)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index d1d89d7c02740..61f546230f9fd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -60,15 +60,9 @@ private static double[] randomPercentiles() { final Set uniquedPercentiles = new HashSet<>(); for (int i = 0; i < length; ++i) { switch (randomInt(20)) { - case 0: - uniquedPercentiles.add(0.0); - break; - case 1: - uniquedPercentiles.add(100.0); - break; - default: - uniquedPercentiles.add(randomDouble() * 100); - break; + case 0 -> uniquedPercentiles.add(0.0); + case 1 -> uniquedPercentiles.add(100.0); + default -> uniquedPercentiles.add(randomDouble() * 100); } } double[] percentiles = uniquedPercentiles.stream().mapToDouble(Double::doubleValue).sorted().toArray(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java index ef5abbbedd3fe..3d50e727a85a6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java @@ -440,7 +440,7 @@ public void testFieldIsntWrittenOutTwice() throws Exception { // you need to add an additional index with no fields in order to trigger this (or potentially a shard) // so that there is an UnmappedTerms in the list to reduce. createIndex("foo_1"); - // @formatter:off + // tag::noformat XContentBuilder builder = jsonBuilder().startObject() .startObject("properties") .startObject("@timestamp") @@ -464,9 +464,9 @@ public void testFieldIsntWrittenOutTwice() throws Exception { .endObject() .endObject() .endObject(); - // @formatter:on + // end::noformat assertAcked(client().admin().indices().prepareCreate("foo_2").setMapping(builder).get()); - // @formatter:off + // tag::noformat XContentBuilder docBuilder = jsonBuilder().startObject() .startObject("license") .field("partnumber", "foobar") @@ -474,7 +474,7 @@ public void testFieldIsntWrittenOutTwice() throws Exception { .endObject() .field("@timestamp", "2018-07-08T08:07:00.599Z") .endObject(); - // @formatter:on + // end::noformat client().prepareIndex("foo_2").setSource(docBuilder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); client().admin().indices().prepareRefresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java index b0f55882d8e3a..ba777be5ff736 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -457,15 +457,13 @@ public void testStoredScript() { .preparePutStoredScript() .setId("my_script") // Source is not interpreted but my_script is defined in CustomScriptPlugin - .setContent( - new BytesArray( - "{ \"script\": { \"lang\": \"" - + CustomScriptPlugin.NAME - + "\", " - + "\"source\": \"Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)\" } }" - ), - XContentType.JSON - ) + .setContent(new BytesArray(""" + { + "script": { + "lang": "%s", + "source": "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)" + } + }""".formatted(CustomScriptPlugin.NAME)), XContentType.JSON) ); Script script = new Script(ScriptType.STORED, null, "my_script", Collections.emptyMap()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java index 6ba6c0b0a6879..10a3d9ce1ccf0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.collect.EvictingQueue; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -18,7 +19,6 @@ import org.hamcrest.Matchers; import java.util.ArrayList; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -69,16 +69,12 @@ public String toString() { private ValuesSourceAggregationBuilder> randomMetric(String name, String field) { int rand = randomIntBetween(0, 3); - switch (rand) { - case 0: - return min(name).field(field); - case 2: - return max(name).field(field); - case 3: - return avg(name).field(field); - default: - return avg(name).field(field); - } + return switch (rand) { + case 0 -> min(name).field(field); + case 2 -> max(name).field(field); + case 3 -> avg(name).field(field); + default -> avg(name).field(field); + }; } private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) { @@ -135,7 +131,7 @@ public void setupSuiteScopeCluster() throws Exception { metric = randomMetric("the_metric", VALUE_FIELD); mockHisto = PipelineAggregationHelperTests.generateHistogram(interval, numBuckets, randomDouble(), randomDouble()); - testValues = new HashMap<>(8); + testValues = Maps.newMapWithExpectedSize(8); for (MetricTarget target : MetricTarget.values()) { setupExpected(target); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index 4ff5ee9386426..099a2791b914b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index a8c2e8ac1eaf8..1a06e1378e028 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index f700d811294ff..b7df9965ad29d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -24,9 +24,9 @@ import java.io.IOException; -import static org.elasticsearch.client.Requests.clusterHealthRequest; -import static org.elasticsearch.client.Requests.refreshRequest; -import static org.elasticsearch.client.Requests.searchRequest; +import static org.elasticsearch.client.internal.Requests.clusterHealthRequest; +import static org.elasticsearch.client.internal.Requests.refreshRequest; +import static org.elasticsearch.client.internal.Requests.searchRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java index 02b70ec19399e..405912c20fa79 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -36,8 +36,8 @@ import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; -import static org.elasticsearch.client.Requests.createIndexRequest; -import static org.elasticsearch.client.Requests.searchRequest; +import static org.elasticsearch.client.internal.Requests.createIndexRequest; +import static org.elasticsearch.client.internal.Requests.searchRequest; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index bb25fbe01771f..543c71ec234e5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -15,7 +15,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -210,10 +210,10 @@ public void testCancel() throws Exception { .get() .getTasks() .stream() - .filter(t -> t.getParentTaskId().isSet() == false) + .filter(t -> t.parentTaskId().isSet() == false) .findFirst() .get(); - final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTaskId(rootTask.getTaskId()); + final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTask.taskId()); cancelRequest.setWaitForCompletion(randomBoolean()); final ActionFuture cancelFuture = client().admin().cluster().cancelTasks(cancelRequest); assertBusy(() -> { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java index 4feadb1916c04..8868b87d7420e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index 456f0d57c49d4..4b19abf28a5f2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -38,7 +38,7 @@ import java.util.Objects; import static java.util.Collections.singletonList; -import static org.elasticsearch.client.Requests.indexRequest; +import static org.elasticsearch.client.internal.Requests.indexRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 0f4643c305fac..c0a3650ee1846 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; @@ -60,14 +61,13 @@ import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -import static org.elasticsearch.client.Requests.searchRequest; +import static org.elasticsearch.client.internal.Requests.searchRequest; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.boostingQuery; import static org.elasticsearch.index.query.QueryBuilders.combinedFieldsQuery; @@ -2978,7 +2978,7 @@ public void testPostingsHighlighterManyDocs() throws Exception { ensureGreen(); int COUNT = between(20, 100); - Map prefixes = new HashMap<>(COUNT); + Map prefixes = Maps.newMapWithExpectedSize(COUNT); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT]; for (int i = 0; i < COUNT; i++) { @@ -3074,16 +3074,16 @@ public void testFastVectorHighlighterPhraseBoost() throws Exception { */ private void phraseBoostTestCase(String highlighterType) { ensureGreen(); - StringBuilder text = new StringBuilder(); - text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n"); - for (int i = 0; i < 10; i++) { - text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n"); - } - text.append("highlight words together\n"); - for (int i = 0; i < 10; i++) { - text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n"); - } - indexDoc("test", "1", "field1", text.toString()); + String text = """ + words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk + """ + """ + junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk + """.repeat(10) + """ + highlight words together + """ + """ + junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk + """.repeat(10); + indexDoc("test", "1", "field1", text); refresh(); // Match queries diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java index 7080f2510e732..5b08ddf95cf5f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.fieldcaps.FieldCapabilitiesIT.ExceptionOnRewriteQueryBuilder; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index d53034d56f69d..bedee045b6816 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -56,7 +56,7 @@ import static java.util.Collections.singleton; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.client.Requests.refreshRequest; +import static org.elasticsearch.client.internal.Requests.refreshRequest; import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index eefe39e41f130..c4707a3b7b6d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -38,8 +38,8 @@ import java.util.Locale; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.client.Requests.indexRequest; -import static org.elasticsearch.client.Requests.searchRequest; +import static org.elasticsearch.client.internal.Requests.indexRequest; +import static org.elasticsearch.client.internal.Requests.searchRequest; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -436,7 +436,7 @@ public void testParseGeoPoint() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); - FunctionScoreQueryBuilder baseQuery = functionScoreQuery( + FunctionScoreQueryBuilder baseQueryBuilder = functionScoreQuery( constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10)) ); @@ -445,7 +445,7 @@ public void testParseGeoPoint() throws Exception { searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", point, "1000km")).boostMode(CombineFunction.REPLACE) + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", point, "1000km")).boostMode(CombineFunction.REPLACE) ) ) ); @@ -460,7 +460,7 @@ public void testParseGeoPoint() throws Exception { searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", coords, "1000km")).boostMode(CombineFunction.REPLACE) + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", coords, "1000km")).boostMode(CombineFunction.REPLACE) ) ) ); @@ -495,7 +495,7 @@ public void testCombineModes() throws Exception { .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field("test", "value value").field("num", 1.0).endObject()) .get(); - FunctionScoreQueryBuilder baseQuery = functionScoreQuery( + FunctionScoreQueryBuilder baseQueryBuilder = functionScoreQuery( constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(2) ); @@ -504,7 +504,9 @@ public void testCombineModes() throws Exception { searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MULTIPLY) + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MULTIPLY + ) ) ) ); @@ -518,7 +520,9 @@ public void testCombineModes() throws Exception { searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.REPLACE) + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.REPLACE + ) ) ) ); @@ -532,7 +536,7 @@ public void testCombineModes() throws Exception { searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.SUM) + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.SUM) ) ) ); @@ -547,7 +551,7 @@ public void testCombineModes() throws Exception { searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.AVG) + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.AVG) ) ) ); @@ -561,7 +565,7 @@ public void testCombineModes() throws Exception { searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MIN) + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MIN) ) ) ); @@ -575,7 +579,7 @@ public void testCombineModes() throws Exception { searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MAX) + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MAX) ) ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index 987ad8fbc30e0..7cac3aba645ac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -44,7 +44,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; -import static org.elasticsearch.client.Requests.searchRequest; +import static org.elasticsearch.client.internal.Requests.searchRequest; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index 49ed7cb5fac5c..98f2e78031126 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -33,7 +33,7 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; -import static org.elasticsearch.client.Requests.searchRequest; +import static org.elasticsearch.client.internal.Requests.searchRequest; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index a2cfee27317ab..a33b45d88449b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -33,8 +33,8 @@ import java.util.List; import static java.util.Collections.singletonList; -import static org.elasticsearch.client.Requests.indexRequest; -import static org.elasticsearch.client.Requests.searchRequest; +import static org.elasticsearch.client.internal.Requests.indexRequest; +import static org.elasticsearch.client.internal.Requests.searchRequest; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 8ccad5150d5d8..7f8a6aa83f793 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -32,8 +32,8 @@ import java.util.List; import java.util.concurrent.ExecutionException; -import static org.elasticsearch.client.Requests.indexRequest; -import static org.elasticsearch.client.Requests.refreshRequest; +import static org.elasticsearch.client.internal.Requests.indexRequest; +import static org.elasticsearch.client.internal.Requests.refreshRequest; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java new file mode 100644 index 0000000000000..45b460fc8cc69 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.nested; + +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.functionscore.ScriptScoreQueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +public class NestedWithMinScoreIT extends ESIntegTestCase { + + public static class ScriptTestPlugin extends MockScriptPlugin { + @Override + protected Map, Object>> pluginScripts() { + return Map.of("score_script", params -> { + final Object scoreAccessor = params.get("_score"); + if (scoreAccessor instanceof Number) { + return ((Number) scoreAccessor).doubleValue(); + } else { + return null; + } + }); + } + } + + @Override + protected Collection> getMockPlugins() { + final List> plugins = new ArrayList<>(super.getMockPlugins()); + plugins.add(ScriptTestPlugin.class); + return plugins; + } + + public void testNestedWithMinScore() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder(); + mapping.startObject(); + mapping.startObject("properties"); + { + mapping.startObject("toolTracks"); + { + mapping.field("type", "nested"); + mapping.startObject("properties"); + { + mapping.startObject("data"); + mapping.field("type", "text"); + mapping.endObject(); + + mapping.startObject("confidence"); + mapping.field("type", "double"); + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + mapping.endObject(); + + client().admin().indices().prepareCreate("test").setMapping(mapping).get(); + + XContentBuilder doc = XContentFactory.jsonBuilder(); + doc.startObject(); + doc.startArray("toolTracks"); + double[] confidence = new double[] { 0.3, 0.92, 0.7, 0.85, 0.2, 0.3, 0.75, 0.82, 0.1, 0.6, 0.3, 0.7 }; + for (double v : confidence) { + doc.startObject(); + doc.field("confidence", v); + doc.field("data", "cash dispenser, automated teller machine, automatic teller machine"); + doc.endObject(); + } + doc.endArray(); + doc.endObject(); + + client().prepareIndex("test").setId("d1").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).setSource(doc).get(); + final BoolQueryBuilder childQuery = new BoolQueryBuilder().filter( + new MatchPhraseQueryBuilder("toolTracks.data", "cash dispenser, automated teller machine, automatic teller machine") + ).filter(new RangeQueryBuilder("toolTracks.confidence").from(0.8)); + + final ScriptScoreQueryBuilder scriptScoreQuery = new ScriptScoreQueryBuilder( + new NestedQueryBuilder("toolTracks", new ConstantScoreQueryBuilder(childQuery), ScoreMode.Total), + new Script(ScriptType.INLINE, MockScriptPlugin.NAME, "score_script", Map.of()) + ); + scriptScoreQuery.setMinScore(1.0f); + SearchSourceBuilder source = new SearchSourceBuilder(); + source.query(scriptScoreQuery); + source.profile(randomBoolean()); + if (randomBoolean()) { + source.trackTotalHitsUpTo(randomBoolean() ? Integer.MAX_VALUE : randomIntBetween(1, 1000)); + } + SearchRequest searchRequest = new SearchRequest("test").source(source); + final SearchResponse searchResponse = client().search(searchRequest).actionGet(); + ElasticsearchAssertions.assertSearchHits(searchResponse, "d1"); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 6c7e1fa430d78..b5c91b7f19b52 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -688,141 +688,128 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { } public void testNestedSortWithMultiLevelFiltering() throws Exception { - assertAcked( - prepareCreate("test").setMapping( - "{\n" - + " \"properties\": {\n" - + " \"acl\": {\n" - + " \"type\": \"nested\",\n" - + " \"properties\": {\n" - + " \"access_id\": {\"type\": \"keyword\"},\n" - + " \"operation\": {\n" - + " \"type\": \"nested\",\n" - + " \"properties\": {\n" - + " \"name\": {\"type\": \"keyword\"},\n" - + " \"user\": {\n" - + " \"type\": \"nested\",\n" - + " \"properties\": {\n" - + " \"username\": {\"type\": \"keyword\"},\n" - + " \"id\": {\"type\": \"integer\"}\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + "}" - ) - ); + assertAcked(prepareCreate("test").setMapping(""" + { + "properties": { + "acl": { + "type": "nested", + "properties": { + "access_id": {"type": "keyword"}, + "operation": { + "type": "nested", + "properties": { + "name": {"type": "keyword"}, + "user": { + "type": "nested", + "properties": { + "username": {"type": "keyword"}, + "id": {"type": "integer"} + } + } + } + } + } + } + } + }""")); ensureGreen(); - client().prepareIndex("test") - .setId("1") - .setSource( - "{\n" - + " \"acl\": [\n" - + " {\n" - + " \"access_id\": 1,\n" - + " \"operation\": [\n" - + " {\n" - + " \"name\": \"read\",\n" - + " \"user\": [\n" - + " {\"username\": \"matt\", \"id\": 1},\n" - + " {\"username\": \"shay\", \"id\": 2},\n" - + " {\"username\": \"adrien\", \"id\": 3}\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"name\": \"write\",\n" - + " \"user\": [\n" - + " {\"username\": \"shay\", \"id\": 2},\n" - + " {\"username\": \"adrien\", \"id\": 3}\n" - + " ]\n" - + " }\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"access_id\": 2,\n" - + " \"operation\": [\n" - + " {\n" - + " \"name\": \"read\",\n" - + " \"user\": [\n" - + " {\"username\": \"jim\", \"id\": 4},\n" - + " {\"username\": \"shay\", \"id\": 2}\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"name\": \"write\",\n" - + " \"user\": [\n" - + " {\"username\": \"shay\", \"id\": 2}\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"name\": \"execute\",\n" - + " \"user\": [\n" - + " {\"username\": \"shay\", \"id\": 2}\n" - + " ]\n" - + " }\n" - + " ]\n" - + " }\n" - + " ]\n" - + "}", - XContentType.JSON - ) - .get(); - - client().prepareIndex("test") - .setId("2") - .setSource( - "{\n" - + " \"acl\": [\n" - + " {\n" - + " \"access_id\": 1,\n" - + " \"operation\": [\n" - + " {\n" - + " \"name\": \"read\",\n" - + " \"user\": [\n" - + " {\"username\": \"matt\", \"id\": 1},\n" - + " {\"username\": \"luca\", \"id\": 5}\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"name\": \"execute\",\n" - + " \"user\": [\n" - + " {\"username\": \"luca\", \"id\": 5}\n" - + " ]\n" - + " }\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"access_id\": 3,\n" - + " \"operation\": [\n" - + " {\n" - + " \"name\": \"read\",\n" - + " \"user\": [\n" - + " {\"username\": \"matt\", \"id\": 1}\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"name\": \"write\",\n" - + " \"user\": [\n" - + " {\"username\": \"matt\", \"id\": 1}\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"name\": \"execute\",\n" - + " \"user\": [\n" - + " {\"username\": \"matt\", \"id\": 1}\n" - + " ]\n" - + " }\n" - + " ]\n" - + " }\n" - + " ]\n" - + "}", - XContentType.JSON - ) - .get(); + client().prepareIndex("test").setId("1").setSource(""" + { + "acl": [ + { + "access_id": 1, + "operation": [ + { + "name": "read", + "user": [ + {"username": "matt", "id": 1}, + {"username": "shay", "id": 2}, + {"username": "adrien", "id": 3} + ] + }, + { + "name": "write", + "user": [ + {"username": "shay", "id": 2}, + {"username": "adrien", "id": 3} + ] + } + ] + }, + { + "access_id": 2, + "operation": [ + { + "name": "read", + "user": [ + {"username": "jim", "id": 4}, + {"username": "shay", "id": 2} + ] + }, + { + "name": "write", + "user": [ + {"username": "shay", "id": 2} + ] + }, + { + "name": "execute", + "user": [ + {"username": "shay", "id": 2} + ] + } + ] + } + ] + }""", XContentType.JSON).get(); + + client().prepareIndex("test").setId("2").setSource(""" + { + "acl": [ + { + "access_id": 1, + "operation": [ + { + "name": "read", + "user": [ + {"username": "matt", "id": 1}, + {"username": "luca", "id": 5} + ] + }, + { + "name": "execute", + "user": [ + {"username": "luca", "id": 5} + ] + } + ] + }, + { + "access_id": 3, + "operation": [ + { + "name": "read", + "user": [ + {"username": "matt", "id": 1} + ] + }, + { + "name": "write", + "user": [ + {"username": "matt", "id": 1} + ] + }, + { + "name": "execute", + "user": [ + {"username": "matt", "id": 1} + ] + } + ] + } + ] + }""", XContentType.JSON).get(); refresh(); // access id = 1, read, max value, asc, should use matt and shay @@ -929,71 +916,60 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { // https://github.com/elastic/elasticsearch/issues/31554 public void testLeakingSortValues() throws Exception { - assertAcked( - prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)) - .setMapping( - "{\"_doc\":{\n" - + " \"dynamic\": \"strict\",\n" - + " \"properties\": {\n" - + " \"nested1\": {\n" - + " \"type\": \"nested\",\n" - + " \"properties\": {\n" - + " \"nested2\": {\n" - + " \"type\": \"nested\",\n" - + " \"properties\": {\n" - + " \"nested2_keyword\": {\n" - + " \"type\": \"keyword\"\n" - + " },\n" - + " \"sortVal\": {\n" - + " \"type\": \"integer\"\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + " }}\n" - ) - ); + assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)).setMapping(""" + { + "_doc": { + "dynamic": "strict", + "properties": { + "nested1": { + "type": "nested", + "properties": { + "nested2": { + "type": "nested", + "properties": { + "nested2_keyword": { + "type": "keyword" + }, + "sortVal": { + "type": "integer" + } + } + } + } + } + } + } + } + """)); ensureGreen(); - client().prepareIndex("test") - .setId("1") - .setSource( - "{\n" - + " \"nested1\": [\n" - + " {\n" - + " \"nested2\": [\n" - + " {\n" - + " \"nested2_keyword\": \"nested2_bar\",\n" - + " \"sortVal\": 1\n" - + " }\n" - + " ]\n" - + " }\n" - + " ]\n" - + "}", - XContentType.JSON - ) - .get(); - - client().prepareIndex("test") - .setId("2") - .setSource( - "{\n" - + " \"nested1\": [\n" - + " {\n" - + " \"nested2\": [\n" - + " {\n" - + " \"nested2_keyword\": \"nested2_bar\",\n" - + " \"sortVal\": 2\n" - + " }\n" - + " ]\n" - + " } \n" - + " ]\n" - + "}", - XContentType.JSON - ) - .get(); + client().prepareIndex("test").setId("1").setSource(""" + { + "nested1": [ + { + "nested2": [ + { + "nested2_keyword": "nested2_bar", + "sortVal": 1 + } + ] + } + ] + }""", XContentType.JSON).get(); + + client().prepareIndex("test").setId("2").setSource(""" + { + "nested1": [ + { + "nested2": [ + { + "nested2_keyword": "nested2_bar", + "sortVal": 2 + } + ] + } + ] + }""", XContentType.JSON).get(); refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index a1a6041c340ac..78ce52ddc8070 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.search.profile.aggregation; -import io.github.nik9000.mapmatcher.MapMatcher; - import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; @@ -25,6 +23,7 @@ import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MapMatcher; import java.io.IOException; import java.time.Instant; @@ -34,14 +33,14 @@ import java.util.Set; import java.util.stream.Collectors; -import static io.github.nik9000.mapmatcher.ListMatcher.matchesList; -import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; -import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.diversifiedSampler; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.max; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.ListMatcher.matchesList; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java index af70787278b85..2090c4909870a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java @@ -12,17 +12,12 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.Operator; -import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.junit.Before; -import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; @@ -32,23 +27,14 @@ import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class QueryStringIT extends ESIntegTestCase { - private static int CLUSTER_MAX_CLAUSE_COUNT; - - @BeforeClass - public static void createRandomClusterSetting() { - CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(50, 100); - } - @Before public void setup() throws Exception { String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); @@ -56,14 +42,6 @@ public void setup() throws Exception { ensureGreen("test"); } - @Override - protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal, otherSettings)) - .put(SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT) - .build(); - } - public void testBasicAllQuery() throws Exception { List reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); @@ -250,103 +228,6 @@ public void testAllFieldsWithSpecifiedLeniency() throws IOException { assertThat(e.getCause().getMessage(), containsString("unit [D] not supported for date math [-2D]")); } - // The only expectation for this test is to not throw exception - public void testLimitOnExpandedFieldsButIgnoreUnmappedFields() throws Exception { - XContentBuilder builder = jsonBuilder(); - builder.startObject(); - builder.startObject("_doc"); - builder.startObject("properties"); - for (int i = 0; i < CLUSTER_MAX_CLAUSE_COUNT; i++) { - builder.startObject("field" + i).field("type", "text").endObject(); - } - builder.endObject(); // properties - builder.endObject(); // type1 - builder.endObject(); - - assertAcked(prepareCreate("ignoreunmappedfields").setMapping(builder)); - - client().prepareIndex("ignoreunmappedfields").setId("1").setSource("field1", "foo bar baz").get(); - refresh(); - - QueryStringQueryBuilder qb = queryStringQuery("bar"); - if (randomBoolean()) { - qb.field("*").field("unmappedField1").field("unmappedField2").field("unmappedField3").field("unmappedField4"); - } - client().prepareSearch("ignoreunmappedfields").setQuery(qb).get(); - } - - public void testLimitOnExpandedFields() throws Exception { - XContentBuilder builder = jsonBuilder(); - builder.startObject(); - { - builder.startObject("_doc"); - { - builder.startObject("properties"); - { - for (int i = 0; i < CLUSTER_MAX_CLAUSE_COUNT; i++) { - builder.startObject("field_A" + i).field("type", "text").endObject(); - builder.startObject("field_B" + i).field("type", "text").endObject(); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endObject(); - } - - assertAcked( - prepareCreate("testindex").setSettings( - Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT + 100) - ).setMapping(builder) - ); - - client().prepareIndex("testindex").setId("1").setSource("field_A0", "foo bar baz").get(); - refresh(); - - // single field shouldn't trigger the limit - doAssertOneHitForQueryString("field_A0:foo"); - // expanding to the limit should work - doAssertOneHitForQueryString("field_A\\*:foo"); - - // adding a non-existing field on top shouldn't overshoot the limit - doAssertOneHitForQueryString("field_A\\*:foo unmapped:something"); - - // the following should exceed the limit - doAssertLimitExceededException("foo", CLUSTER_MAX_CLAUSE_COUNT * 2, "*"); - doAssertLimitExceededException("*:foo", CLUSTER_MAX_CLAUSE_COUNT * 2, "*"); - doAssertLimitExceededException("field_\\*:foo", CLUSTER_MAX_CLAUSE_COUNT * 2, "field_*"); - } - - private void doAssertOneHitForQueryString(String queryString) { - QueryStringQueryBuilder qb = queryStringQuery(queryString); - if (randomBoolean()) { - qb.defaultField("*"); - } - SearchResponse response = client().prepareSearch("testindex").setQuery(qb).get(); - assertHitCount(response, 1); - } - - private void doAssertLimitExceededException(String queryString, int exceedingFieldCount, String inputFieldPattern) { - Exception e = expectThrows(Exception.class, () -> { - QueryStringQueryBuilder qb = queryStringQuery(queryString); - if (randomBoolean()) { - qb.defaultField("*"); - } - client().prepareSearch("testindex").setQuery(qb).get(); - }); - assertThat( - ExceptionsHelper.unwrap(e, IllegalArgumentException.class).getMessage(), - containsString( - "field expansion for [" - + inputFieldPattern - + "] matches too many fields, limit: " - + CLUSTER_MAX_CLAUSE_COUNT - + ", got: " - + exceedingFieldCount - ) - ); - } - public void testFieldAlias() throws Exception { List indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index 45819c407b74b..e2dba7ad1487c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -1366,17 +1366,29 @@ public void testIntervals() throws InterruptedException { client().prepareIndex("test").setId("1").setSource("description", "it's cold outside, there's no kind of atmosphere") ); - String json = "{ \"intervals\" : " - + "{ \"description\": { " - + " \"all_of\" : {" - + " \"ordered\" : \"true\"," - + " \"intervals\" : [" - + " { \"any_of\" : {" - + " \"intervals\" : [" - + " { \"match\" : { \"query\" : \"cold\" } }," - + " { \"match\" : { \"query\" : \"outside\" } } ] } }," - + " { \"match\" : { \"query\" : \"atmosphere\" } } ]," - + " \"max_gaps\" : 30 } } } }"; + String json = """ + { + "intervals": { + "description": { + "all_of": { + "ordered": "true", + "intervals": [ + { + "any_of": { + "intervals": [ { "match": { "query": "cold" } }, { "match": { "query": "outside" } } ] + } + }, + { + "match": { + "query": "atmosphere" + } + } + ], + "max_gaps": 30 + } + } + } + }"""; SearchResponse response = client().prepareSearch("test").setQuery(wrapperQuery(json)).get(); assertHitCount(response, 1L); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 0c746cedf301d..387123189217c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -11,7 +11,6 @@ import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -19,23 +18,18 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.SimpleQueryStringFlag; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; -import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; @@ -69,21 +63,6 @@ */ public class SimpleQueryStringIT extends ESIntegTestCase { - private static int CLUSTER_MAX_CLAUSE_COUNT; - - @BeforeClass - public static void createRandomClusterSetting() { - CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal, otherSettings)) - .put(SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT) - .build(); - } - @Override protected Collection> nodePlugins() { return Collections.singletonList(MockAnalysisPlugin.class); @@ -565,11 +544,15 @@ public void testKeywordWithWhitespace() throws Exception { assertHitCount(resp, 2L); } - public void testAllFieldsWithSpecifiedLeniency() throws IOException { + public void testAllFieldsWithSpecifiedLeniency() throws Exception { String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); ensureGreen("test"); + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test").setId("1").setSource("f_long", 1)); + indexRandom(true, false, reqs); + SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, () -> client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo123").lenient(false)).get() @@ -577,43 +560,6 @@ public void testAllFieldsWithSpecifiedLeniency() throws IOException { assertThat(e.getDetailedMessage(), containsString("NumberFormatException: For input string: \"foo123\"")); } - public void testLimitOnExpandedFields() throws Exception { - XContentBuilder builder = jsonBuilder(); - builder.startObject(); - builder.startObject("_doc"); - builder.startObject("properties"); - for (int i = 0; i < CLUSTER_MAX_CLAUSE_COUNT + 1; i++) { - builder.startObject("field" + i).field("type", "text").endObject(); - } - builder.endObject(); // properties - builder.endObject(); // type1 - builder.endObject(); - - assertAcked( - prepareCreate("toomanyfields").setSettings( - Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT + 100) - ).setMapping(builder) - ); - - client().prepareIndex("toomanyfields").setId("1").setSource("field1", "foo bar baz").get(); - refresh(); - - doAssertLimitExceededException("*", CLUSTER_MAX_CLAUSE_COUNT + 1); - doAssertLimitExceededException("field*", CLUSTER_MAX_CLAUSE_COUNT + 1); - } - - private void doAssertLimitExceededException(String field, int exceedingFieldCount) { - Exception e = expectThrows(Exception.class, () -> { - QueryStringQueryBuilder qb = queryStringQuery("bar"); - qb.field(field); - client().prepareSearch("toomanyfields").setQuery(qb).get(); - }); - assertThat( - ExceptionsHelper.unwrap(e, IllegalArgumentException.class).getMessage(), - containsString("field expansion matches too many fields, limit: " + CLUSTER_MAX_CLAUSE_COUNT + ", got: " + exceedingFieldCount) - ); - } - public void testFieldAlias() throws Exception { String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); assertAcked(prepareCreate("test").setSource(indexBody, XContentType.JSON)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java index 03a431f792424..ea3ae8a38d263 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.OperationRouting; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java index 699fa9c05a0ba..37f93ff25bfa2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.common.collect.ImmutableOpenMap; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 450d97710611a..9f0fb7c01ca6e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -688,7 +688,9 @@ public void testScrollRewrittenToMatchNoDocs() { .indices() .prepareCreate("test") .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)) - .setMapping("{\"properties\":{\"created_date\":{\"type\": \"date\", \"format\": \"yyyy-MM-dd\"}}}") + .setMapping(""" + {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}} + """) ); client().prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index 76dc3a961bfb4..85de5cc1527a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -185,27 +185,13 @@ public void testWithSimpleTypes() throws Exception { List values = new ArrayList<>(); for (int type : types) { switch (type) { - case 0: - values.add(randomBoolean()); - break; - case 1: - values.add(randomByte()); - break; - case 2: - values.add(randomShort()); - break; - case 3: - values.add(randomInt()); - break; - case 4: - values.add(randomFloat()); - break; - case 5: - values.add(randomDouble()); - break; - case 6: - values.add(randomAlphaOfLengthBetween(5, 20)); - break; + case 0 -> values.add(randomBoolean()); + case 1 -> values.add(randomByte()); + case 2 -> values.add(randomShort()); + case 3 -> values.add(randomInt()); + case 4 -> values.add(randomFloat()); + case 5 -> values.add(randomDouble()); + case 6 -> values.add(randomAlphaOfLengthBetween(5, 20)); } } values.add(UUIDs.randomBase64UUID()); @@ -411,14 +397,13 @@ private List convertSortValues(List sortValues) { List converted = new ArrayList<>(); for (int i = 0; i < sortValues.size(); i++) { Object from = sortValues.get(i); - if (from instanceof Integer) { - converted.add(((Integer) from).longValue()); - } else if (from instanceof Short) { - converted.add(((Short) from).longValue()); - } else if (from instanceof Byte) { - converted.add(((Byte) from).longValue()); - } else if (from instanceof Boolean) { - boolean b = (boolean) from; + if (from instanceof Integer integer) { + converted.add(integer.longValue()); + } else if (from instanceof Short s) { + converted.add(s.longValue()); + } else if (from instanceof Byte b) { + converted.add(b.longValue()); + } else if (from instanceof Boolean b) { if (b) { converted.add(1L); } else { @@ -446,13 +431,8 @@ public void testScrollAndSearchAfterWithBigIndex() { if (randomBoolean()) { indexSettings.put("sort.field", "timestamp").put("sort.order", randomFrom("desc", "asc")); } - assertAcked( - client().admin() - .indices() - .prepareCreate("test") - .setSettings(indexSettings) - .setMapping("{\"properties\":{\"timestamp\":{\"type\": \"date\", \"format\": \"epoch_millis\"}}}") - ); + assertAcked(client().admin().indices().prepareCreate("test").setSettings(indexSettings).setMapping(""" + {"properties":{"timestamp":{"type": "date", "format": "epoch_millis"}}}""")); Randomness.shuffle(timestamps); final BulkRequestBuilder bulk = client().prepareBulk(); bulk.setRefreshPolicy(IMMEDIATE); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 71f246539cc3d..2659819050996 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.simple; +import org.apache.lucene.util.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -343,8 +344,8 @@ public void testSimpleTerminateAfterCount() throws Exception { assertFalse(searchResponse.isTerminatedEarly()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/78080") public void testSimpleIndexSortEarlyTerminate() throws Exception { + skipTestWaitingForLuceneFix(Version.fromBits(9, 1, 0), "LUCENE-10377"); prepareCreate("test").setSettings( Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") ).setMapping("rank", "type=integer").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index fe203347f9efe..afc5eb4a3304f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -1053,11 +1053,11 @@ public void testThatSuggestStopFilterWorks() throws Exception { .put("index.analysis.filter.suggest_stop_filter.type", "stop") .put("index.analysis.filter.suggest_stop_filter.remove_trailing", false); - CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder(); - completionMappingBuilder.preserveSeparators(true).preservePositionIncrements(true); - completionMappingBuilder.searchAnalyzer("stoptest"); - completionMappingBuilder.indexAnalyzer("simple"); - createIndexAndMappingAndSettings(settingsBuilder.build(), completionMappingBuilder); + CompletionMappingBuilder builder = new CompletionMappingBuilder(); + builder.preserveSeparators(true).preservePositionIncrements(true); + builder.searchAnalyzer("stoptest"); + builder.indexAnalyzer("simple"); + createIndexAndMappingAndSettings(settingsBuilder.build(), builder); client().prepareIndex(INDEX) .setId("1") @@ -1103,8 +1103,8 @@ public void testThatSuggestStopFilterWorks() throws Exception { } public void testThatIndexingInvalidFieldsInCompletionFieldResultsInException() throws Exception { - CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder(); - createIndexAndMapping(completionMappingBuilder); + CompletionMappingBuilder builder = new CompletionMappingBuilder(); + createIndexAndMapping(builder); try { client().prepareIndex(INDEX) @@ -1262,7 +1262,7 @@ private static List getNames(Suggest.Suggestion.Entry> contextMapping : completionMappingBuilder.contextMappings.entrySet()) { + for (Map.Entry> contextMapping : builder.contextMappings.entrySet()) { mapping = mapping.startObject() .field("name", contextMapping.getValue().name()) .field("type", contextMapping.getValue().type().name()); - switch (contextMapping.getValue().type()) { - case CATEGORY: - mapping = mapping.field("path", ((CategoryContextMapping) contextMapping.getValue()).getFieldName()); - break; - case GEO: - mapping = mapping.field("path", ((GeoContextMapping) contextMapping.getValue()).getFieldName()) - .field("precision", ((GeoContextMapping) contextMapping.getValue()).getPrecision()); - break; - } + mapping = switch (contextMapping.getValue().type()) { + case CATEGORY -> mapping.field("path", ((CategoryContextMapping) contextMapping.getValue()).getFieldName()); + case GEO -> mapping.field("path", ((GeoContextMapping) contextMapping.getValue()).getFieldName()) + .field("precision", ((GeoContextMapping) contextMapping.getValue()).getPrecision()); + }; mapping = mapping.endObject(); } @@ -1312,8 +1308,8 @@ private void createIndexAndMappingAndSettings(Settings settings, CompletionMappi ); } - private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException { - createIndexAndMappingAndSettings(Settings.EMPTY, completionMappingBuilder); + private void createIndexAndMapping(CompletionMappingBuilder builder) throws IOException { + createIndexAndMappingAndSettings(Settings.EMPTY, builder); } // see #3555 @@ -1343,7 +1339,7 @@ public void testPrunedSegments() throws IOException { assertSuggestions("b"); assertThat(2L, equalTo(client().prepareSearch(INDEX).setSize(0).get().getHits().getTotalHits().value)); for (IndexShardSegments seg : client().admin().indices().prepareSegments().get().getIndices().get(INDEX)) { - ShardSegments[] shards = seg.getShards(); + ShardSegments[] shards = seg.shards(); for (ShardSegments shardSegments : shards) { assertThat(shardSegments.getSegments().size(), equalTo(1)); } @@ -1638,8 +1634,8 @@ public CompletionMappingBuilder preservePositionIncrements(Boolean preservePosit return this; } - public CompletionMappingBuilder context(LinkedHashMap> contextMappings) { - this.contextMappings = contextMappings; + public CompletionMappingBuilder context(LinkedHashMap> mappings) { + this.contextMappings = mappings; return this; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index 1e3e12c3f507d..7944bdb595681 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -700,20 +700,20 @@ private void createIndexAndMappingAndSettings(Settings settings, CompletionMappi .field("name", contextMapping.getValue().name()) .field("type", contextMapping.getValue().type().name()); switch (contextMapping.getValue().type()) { - case CATEGORY: + case CATEGORY -> { final String fieldName = ((CategoryContextMapping) contextMapping.getValue()).getFieldName(); if (fieldName != null) { mapping.field("path", fieldName); categoryContextFields.add(fieldName); } - break; - case GEO: + } + case GEO -> { final String name = ((GeoContextMapping) contextMapping.getValue()).getFieldName(); mapping.field("precision", ((GeoContextMapping) contextMapping.getValue()).getPrecision()); if (name != null) { mapping.field("path", name); } - break; + } } mapping.endObject(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java index 38d15d91d8802..4c278aeddb5bf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java index f519e5b863c28..28f797dfa5ec6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index af3c142935450..f69cc0daa0ac8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -644,8 +644,8 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { networkDisruption.stopDisrupting(); logger.info("--> make sure all failing requests get a response"); - expectThrows(RepositoryException.class, firstDeleteFuture::actionGet); - expectThrows(RepositoryException.class, secondDeleteFuture::actionGet); + assertAcked(firstDeleteFuture.get()); + assertAcked(secondDeleteFuture.get()); expectThrows(SnapshotException.class, createThirdSnapshot::actionGet); awaitNoMoreRunningOperations(); @@ -675,7 +675,7 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except ); awaitNumberOfSnapshotsInProgress(2); - final ActionFuture failedDeleteFuture = client(masterNode).admin() + final ActionFuture deleteFuture = client(masterNode).admin() .cluster() .prepareDeleteSnapshot(repoName, "*") .execute(); @@ -689,7 +689,7 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except logger.info("--> make sure all failing requests get a response"); expectThrows(SnapshotException.class, firstFailedSnapshotFuture::actionGet); expectThrows(SnapshotException.class, secondFailedSnapshotFuture::actionGet); - expectThrows(RepositoryException.class, failedDeleteFuture::actionGet); + assertAcked(deleteFuture.get()); awaitNoMoreRunningOperations(); } @@ -1037,7 +1037,7 @@ public void testMasterFailoverOnFinalizationLoop() throws Exception { final List snapshotNames = createNSnapshots(repoName, randomIntBetween(2, 5)); final String masterName = internalCluster().getMasterName(); blockMasterFromDeletingIndexNFile(repoName); - final ActionFuture snapshotThree = startFullSnapshotFromMasterClient(repoName, "snap-other"); + final ActionFuture snapshotOther = startFullSnapshotFromMasterClient(repoName, "snap-other"); waitForBlock(masterName, repoName); final String snapshotOne = snapshotNames.get(0); @@ -1050,15 +1050,70 @@ public void testMasterFailoverOnFinalizationLoop() throws Exception { networkDisruption.stopDisrupting(); ensureStableCluster(4); - assertSuccessful(snapshotThree); + assertSuccessful(snapshotOther); try { deleteSnapshotOne.actionGet(); } catch (RepositoryException re) { // ignored + } catch (SnapshotMissingException re) { + // When master node is isolated during this test, the newly elected master takes over and executes the snapshot deletion. In + // this case the retried delete snapshot operation on the new master can fail with SnapshotMissingException } awaitNoMoreRunningOperations(); } + public void testMasterFailoverDuringStaleIndicesCleanup() throws Exception { + internalCluster().startMasterOnlyNodes(3); + final String dataNode = internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + createRepository(repoName, "mock"); + createFullSnapshot(repoName, "empty-snapshot"); + // use a few more shards to make master take a little longer to clean up the stale index and simulate more concurrency between + // snapshot create and delete below + createIndexWithContent("index-test", indexSettingsNoReplicas(randomIntBetween(6, 10)).build()); + final NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(networkDisruption); + + final List fullSnapshotsToDelete = createNSnapshots(repoName, randomIntBetween(1, 5)); + final String masterName = internalCluster().getMasterName(); + blockMasterOnAnyDataFile(repoName); + final ActionFuture deleteAllSnapshotsWithIndex = startDeleteSnapshots( + repoName, + fullSnapshotsToDelete, + masterName + ); + + // wait for the delete to show up in the CS so that the below snapshot is queued after it for sure + awaitNDeletionsInProgress(1); + final ActionFuture snapshotFuture = startFullSnapshotFromDataNode(repoName, "new-full-snapshot"); + waitForBlock(masterName, repoName); + awaitNumberOfSnapshotsInProgress(1); + networkDisruption.startDisrupting(); + ensureStableCluster(3, dataNode); + // wait for the snapshot to finish while the isolated master is stuck on deleting a data blob + try { + snapshotFuture.get(); + } catch (Exception e) { + // ignore exceptions here, the snapshot will work out fine in all cases but the API might throw because of the master + // fail-over during the snapshot + // TODO: remove this leniency once we fix the API to handle master failover cleaner + } + awaitNoMoreRunningOperations(dataNode); + + // now unblock the stale master and have it continue deleting blobs from the repository + unblockNode(repoName, masterName); + + networkDisruption.stopDisrupting(); + ensureStableCluster(4); + try { + deleteAllSnapshotsWithIndex.get(); + } catch (Exception ignored) { + // ignored as we had a failover in here and will get all kinds of errors as a result, just making sure the future completes in + // all cases for now + // TODO: remove this leniency once we fix the API to handle master failover cleaner + } + } + public void testStatusMultipleSnapshotsMultipleRepos() throws Exception { internalCluster().startMasterOnlyNode(); // We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads @@ -1972,6 +2027,16 @@ private ActionFuture startFullSnapshotFromNonMasterClien .execute(); } + private ActionFuture startFullSnapshotFromDataNode(String repoName, String snapshotName) { + logger.info("--> creating full snapshot [{}] to repo [{}] from data node client", snapshotName, repoName); + return internalCluster().dataNodeClient() + .admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) + .execute(); + } + private ActionFuture startFullSnapshotFromMasterClient(String repoName, String snapshotName) { logger.info("--> creating full snapshot [{}] to repo [{}] from master client", snapshotName, repoName); return internalCluster().masterClient() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index d677043917bce..12f1475c449e8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java new file mode 100644 index 0000000000000..266ef7d74f543 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -0,0 +1,259 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.snapshots; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.test.TestCustomMetadata; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class CustomMetadataContextIT extends AbstractSnapshotIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(TestCustomMetadataPlugin.class); + } + + public void testShouldNotRestoreRepositoryMetadata() { + var repoPath = randomRepoPath(); + + logger.info("create repository"); + createRepository("test-repo-1", "fs", repoPath); + + logger.info("create snapshot"); + createFullSnapshot("test-repo-1", "test-snap"); + assertThat(getSnapshot("test-repo-1", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("delete repository"); + assertAcked(clusterAdmin().prepareDeleteRepository("test-repo-1")); + + logger.info("create another repository"); + createRepository("test-repo-2", "fs", repoPath); + + logger.info("restore snapshot"); + clusterAdmin().prepareRestoreSnapshot("test-repo-2", "test-snap") + .setRestoreGlobalState(true) + .setIndices("-*") + .setWaitForCompletion(true) + .execute() + .actionGet(); + + logger.info("make sure old repository wasn't restored"); + assertRequestBuilderThrows(clusterAdmin().prepareGetRepositories("test-repo-1"), RepositoryMissingException.class); + assertThat(clusterAdmin().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1)); + } + + public void testShouldRestoreOnlySnapshotMetadata() throws Exception { + var repoPath = randomRepoPath(); + + logger.info("create repository"); + createRepository("test-repo", "fs", repoPath); + + logger.info("add custom persistent metadata"); + boolean isSnapshotMetadataSet = randomBoolean(); + updateClusterState(currentState -> currentState.copyAndUpdateMetadata(metadataBuilder -> { + if (isSnapshotMetadataSet) { + metadataBuilder.putCustom(SnapshotMetadata.TYPE, new SnapshotMetadata("before_snapshot_s")); + } + metadataBuilder.putCustom(ApiMetadata.TYPE, new ApiMetadata("before_snapshot_ns")); + })); + + logger.info("create snapshot"); + createFullSnapshot("test-repo", "test-snapshot"); + assertThat(getSnapshot("test-repo", "test-snapshot").state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("update custom persistent metadata"); + updateClusterState(currentState -> currentState.copyAndUpdateMetadata(metadataBuilder -> { + if (isSnapshotMetadataSet == false || randomBoolean()) { + metadataBuilder.putCustom(SnapshotMetadata.TYPE, new SnapshotMetadata("after_snapshot_s")); + } else { + metadataBuilder.removeCustom(SnapshotMetadata.TYPE); + } + metadataBuilder.putCustom(ApiMetadata.TYPE, new ApiMetadata("after_snapshot_ns")); + })); + + logger.info("restore snapshot"); + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snapshot") + .setRestoreGlobalState(true) + .setIndices("-*") + .setWaitForCompletion(true) + .execute() + .actionGet(); + + var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + logger.info("check that custom persistent metadata [{}] is correctly restored", metadata); + if (isSnapshotMetadataSet) { + assertThat(metadata.custom(SnapshotMetadata.TYPE).getData(), equalTo("before_snapshot_s")); + } else { + assertThat(metadata.custom(SnapshotMetadata.TYPE), nullValue()); + } + assertThat(metadata.custom(ApiMetadata.TYPE).getData(), equalTo("after_snapshot_ns")); + } + + public void testShouldKeepGatewayMetadataAfterRestart() throws Exception { + logger.info("add custom gateway metadata"); + updateClusterState(currentState -> currentState.copyAndUpdateMetadata(metadataBuilder -> { + metadataBuilder.putCustom(GatewayMetadata.TYPE, new GatewayMetadata("before_restart_s_gw")); + metadataBuilder.putCustom(ApiMetadata.TYPE, new ApiMetadata("before_restart_ns")); + })); + + logger.info("restart all nodes"); + internalCluster().fullRestart(); + ensureYellow(); + + var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + logger.info("check that gateway custom metadata [{}] survived full cluster restart", metadata); + assertThat(metadata.custom(GatewayMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); + assertThat(metadata.custom(ApiMetadata.TYPE), nullValue()); + } + + public void testShouldExposeApiMetadata() throws Exception { + logger.info("add custom api metadata"); + updateClusterState(currentState -> currentState.copyAndUpdateMetadata(metadataBuilder -> { + metadataBuilder.putCustom(ApiMetadata.TYPE, new ApiMetadata("before_restart_s_gw")); + metadataBuilder.putCustom(NonApiMetadata.TYPE, new NonApiMetadata("before_restart_ns")); + })); + + var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + logger.info("check that api custom metadata [{}] is visible via api", metadata); + assertThat(metadata.custom(ApiMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); + assertThat(metadata.custom(NonApiMetadata.TYPE), nullValue()); + } + + public static class TestCustomMetadataPlugin extends Plugin { + + private final List namedWritables = new ArrayList<>(); + private final List namedXContents = new ArrayList<>(); + + public TestCustomMetadataPlugin() { + registerBuiltinWritables(); + } + + private void registerMetadataCustom( + String name, + Writeable.Reader reader, + Writeable.Reader> diffReader, + CheckedFunction parser + ) { + namedWritables.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, name, reader)); + namedWritables.add(new NamedWriteableRegistry.Entry(NamedDiff.class, name, diffReader)); + namedXContents.add(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(name), parser)); + } + + private void registerBuiltinWritables() { + Map.>of( + SnapshotMetadata.TYPE, + SnapshotMetadata::new, + GatewayMetadata.TYPE, + GatewayMetadata::new, + ApiMetadata.TYPE, + ApiMetadata::new, + NonApiMetadata.TYPE, + NonApiMetadata::new + ) + .forEach( + (type, constructor) -> registerMetadataCustom( + type, + in -> TestCustomMetadata.readFrom(constructor, in), + in -> TestCustomMetadata.readDiffFrom(type, in), + parser -> TestCustomMetadata.fromXContent(constructor, parser) + ) + ); + } + + @Override + public List getNamedWriteables() { + return namedWritables; + } + + @Override + public List getNamedXContent() { + return namedXContents; + } + } + + private abstract static class ThisTestCustomMetadata extends TestCustomMetadata { + private final String type; + private final EnumSet context; + + ThisTestCustomMetadata(String data, String type, EnumSet context) { + super(data); + this.type = type; + this.context = context; + } + + @Override + public String getWriteableName() { + return type; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return context; + } + } + + private static class SnapshotMetadata extends ThisTestCustomMetadata { + public static final String TYPE = "test_metadata_scope_snapshot"; + + SnapshotMetadata(String data) { + super(data, TYPE, Metadata.API_AND_SNAPSHOT); + } + } + + private static class GatewayMetadata extends ThisTestCustomMetadata { + public static final String TYPE = "test_metadata_scope_gateway"; + + GatewayMetadata(String data) { + super(data, TYPE, Metadata.API_AND_GATEWAY); + } + } + + private static class ApiMetadata extends ThisTestCustomMetadata { + public static final String TYPE = "test_metadata_scope_api"; + + ApiMetadata(String data) { + super(data, TYPE, Metadata.API_ONLY); + } + } + + private static class NonApiMetadata extends ThisTestCustomMetadata { + public static final String TYPE = "test_metadata_scope_non_api"; + + NonApiMetadata(String data) { + super(data, TYPE, EnumSet.of(Metadata.XContentContext.GATEWAY, Metadata.XContentContext.SNAPSHOT)); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataSnapshotIT.java deleted file mode 100644 index 471192343ebc8..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataSnapshotIT.java +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.snapshots; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.NamedDiff; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.RepositoryMissingException; -import org.elasticsearch.test.TestCustomMetadata; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; - -public class CustomMetadataSnapshotIT extends AbstractSnapshotIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singletonList(TestCustomMetadataPlugin.class); - } - - public void testRestoreCustomMetadata() throws Exception { - Path tempDir = randomRepoPath(); - - logger.info("--> start node"); - internalCluster().startNode(); - createIndex("test-idx"); - logger.info("--> add custom persistent metadata"); - updateClusterState(currentState -> { - ClusterState.Builder builder = ClusterState.builder(currentState); - Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); - metadataBuilder.putCustom(SnapshottableMetadata.TYPE, new SnapshottableMetadata("before_snapshot_s")); - metadataBuilder.putCustom(NonSnapshottableMetadata.TYPE, new NonSnapshottableMetadata("before_snapshot_ns")); - metadataBuilder.putCustom(SnapshottableGatewayMetadata.TYPE, new SnapshottableGatewayMetadata("before_snapshot_s_gw")); - metadataBuilder.putCustom(NonSnapshottableGatewayMetadata.TYPE, new NonSnapshottableGatewayMetadata("before_snapshot_ns_gw")); - metadataBuilder.putCustom( - SnapshotableGatewayNoApiMetadata.TYPE, - new SnapshotableGatewayNoApiMetadata("before_snapshot_s_gw_noapi") - ); - builder.metadata(metadataBuilder); - return builder.build(); - }); - - createRepository("test-repo", "fs", tempDir); - createFullSnapshot("test-repo", "test-snap"); - assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); - - logger.info("--> change custom persistent metadata"); - updateClusterState(currentState -> { - ClusterState.Builder builder = ClusterState.builder(currentState); - Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); - if (randomBoolean()) { - metadataBuilder.putCustom(SnapshottableMetadata.TYPE, new SnapshottableMetadata("after_snapshot_s")); - } else { - metadataBuilder.removeCustom(SnapshottableMetadata.TYPE); - } - metadataBuilder.putCustom(NonSnapshottableMetadata.TYPE, new NonSnapshottableMetadata("after_snapshot_ns")); - if (randomBoolean()) { - metadataBuilder.putCustom(SnapshottableGatewayMetadata.TYPE, new SnapshottableGatewayMetadata("after_snapshot_s_gw")); - } else { - metadataBuilder.removeCustom(SnapshottableGatewayMetadata.TYPE); - } - metadataBuilder.putCustom(NonSnapshottableGatewayMetadata.TYPE, new NonSnapshottableGatewayMetadata("after_snapshot_ns_gw")); - metadataBuilder.removeCustom(SnapshotableGatewayNoApiMetadata.TYPE); - builder.metadata(metadataBuilder); - return builder.build(); - }); - - logger.info("--> delete repository"); - assertAcked(clusterAdmin().prepareDeleteRepository("test-repo")); - - createRepository("test-repo-2", "fs", tempDir); - - logger.info("--> restore snapshot"); - clusterAdmin().prepareRestoreSnapshot("test-repo-2", "test-snap") - .setRestoreGlobalState(true) - .setIndices("-*") - .setWaitForCompletion(true) - .execute() - .actionGet(); - - logger.info("--> make sure old repository wasn't restored"); - assertRequestBuilderThrows(clusterAdmin().prepareGetRepositories("test-repo"), RepositoryMissingException.class); - assertThat(clusterAdmin().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1)); - - logger.info("--> check that custom persistent metadata was restored"); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); - logger.info("Cluster state: {}", clusterState); - Metadata metadata = clusterState.getMetadata(); - assertThat(((SnapshottableMetadata) metadata.custom(SnapshottableMetadata.TYPE)).getData(), equalTo("before_snapshot_s")); - assertThat(((NonSnapshottableMetadata) metadata.custom(NonSnapshottableMetadata.TYPE)).getData(), equalTo("after_snapshot_ns")); - assertThat( - ((SnapshottableGatewayMetadata) metadata.custom(SnapshottableGatewayMetadata.TYPE)).getData(), - equalTo("before_snapshot_s_gw") - ); - assertThat( - ((NonSnapshottableGatewayMetadata) metadata.custom(NonSnapshottableGatewayMetadata.TYPE)).getData(), - equalTo("after_snapshot_ns_gw") - ); - - logger.info("--> restart all nodes"); - internalCluster().fullRestart(); - ensureYellow(); - - logger.info("--> check that gateway-persistent custom metadata survived full cluster restart"); - clusterState = clusterAdmin().prepareState().get().getState(); - logger.info("Cluster state: {}", clusterState); - metadata = clusterState.getMetadata(); - assertThat(metadata.custom(SnapshottableMetadata.TYPE), nullValue()); - assertThat(metadata.custom(NonSnapshottableMetadata.TYPE), nullValue()); - assertThat( - ((SnapshottableGatewayMetadata) metadata.custom(SnapshottableGatewayMetadata.TYPE)).getData(), - equalTo("before_snapshot_s_gw") - ); - assertThat( - ((NonSnapshottableGatewayMetadata) metadata.custom(NonSnapshottableGatewayMetadata.TYPE)).getData(), - equalTo("after_snapshot_ns_gw") - ); - // Shouldn't be returned as part of API response - assertThat(metadata.custom(SnapshotableGatewayNoApiMetadata.TYPE), nullValue()); - // But should still be in state - metadata = internalCluster().getInstance(ClusterService.class).state().metadata(); - assertThat( - ((SnapshotableGatewayNoApiMetadata) metadata.custom(SnapshotableGatewayNoApiMetadata.TYPE)).getData(), - equalTo("before_snapshot_s_gw_noapi") - ); - } - - public static class TestCustomMetadataPlugin extends Plugin { - - private final List namedWritables = new ArrayList<>(); - private final List namedXContents = new ArrayList<>(); - - public TestCustomMetadataPlugin() { - registerBuiltinWritables(); - } - - private void registerMetadataCustom( - String name, - Writeable.Reader reader, - Writeable.Reader> diffReader, - CheckedFunction parser - ) { - namedWritables.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, name, reader)); - namedWritables.add(new NamedWriteableRegistry.Entry(NamedDiff.class, name, diffReader)); - namedXContents.add(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(name), parser)); - } - - private void registerBuiltinWritables() { - registerMetadataCustom( - SnapshottableMetadata.TYPE, - SnapshottableMetadata::readFrom, - SnapshottableMetadata::readDiffFrom, - SnapshottableMetadata::fromXContent - ); - registerMetadataCustom( - NonSnapshottableMetadata.TYPE, - NonSnapshottableMetadata::readFrom, - NonSnapshottableMetadata::readDiffFrom, - NonSnapshottableMetadata::fromXContent - ); - registerMetadataCustom( - SnapshottableGatewayMetadata.TYPE, - SnapshottableGatewayMetadata::readFrom, - SnapshottableGatewayMetadata::readDiffFrom, - SnapshottableGatewayMetadata::fromXContent - ); - registerMetadataCustom( - NonSnapshottableGatewayMetadata.TYPE, - NonSnapshottableGatewayMetadata::readFrom, - NonSnapshottableGatewayMetadata::readDiffFrom, - NonSnapshottableGatewayMetadata::fromXContent - ); - registerMetadataCustom( - SnapshotableGatewayNoApiMetadata.TYPE, - SnapshotableGatewayNoApiMetadata::readFrom, - NonSnapshottableGatewayMetadata::readDiffFrom, - SnapshotableGatewayNoApiMetadata::fromXContent - ); - } - - @Override - public List getNamedWriteables() { - return namedWritables; - } - - @Override - public List getNamedXContent() { - return namedXContents; - } - } - - private static class SnapshottableMetadata extends TestCustomMetadata { - public static final String TYPE = "test_snapshottable"; - - SnapshottableMetadata(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - public static SnapshottableMetadata readFrom(StreamInput in) throws IOException { - return readFrom(SnapshottableMetadata::new, in); - } - - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(TYPE, in); - } - - public static SnapshottableMetadata fromXContent(XContentParser parser) throws IOException { - return fromXContent(SnapshottableMetadata::new, parser); - } - - @Override - public EnumSet context() { - return Metadata.API_AND_SNAPSHOT; - } - } - - private static class NonSnapshottableMetadata extends TestCustomMetadata { - public static final String TYPE = "test_non_snapshottable"; - - NonSnapshottableMetadata(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - public static NonSnapshottableMetadata readFrom(StreamInput in) throws IOException { - return readFrom(NonSnapshottableMetadata::new, in); - } - - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(TYPE, in); - } - - public static NonSnapshottableMetadata fromXContent(XContentParser parser) throws IOException { - return fromXContent(NonSnapshottableMetadata::new, parser); - } - - @Override - public EnumSet context() { - return Metadata.API_ONLY; - } - } - - private static class SnapshottableGatewayMetadata extends TestCustomMetadata { - public static final String TYPE = "test_snapshottable_gateway"; - - SnapshottableGatewayMetadata(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - public static SnapshottableGatewayMetadata readFrom(StreamInput in) throws IOException { - return readFrom(SnapshottableGatewayMetadata::new, in); - } - - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(TYPE, in); - } - - public static SnapshottableGatewayMetadata fromXContent(XContentParser parser) throws IOException { - return fromXContent(SnapshottableGatewayMetadata::new, parser); - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.API, Metadata.XContentContext.SNAPSHOT, Metadata.XContentContext.GATEWAY); - } - } - - private static class NonSnapshottableGatewayMetadata extends TestCustomMetadata { - public static final String TYPE = "test_non_snapshottable_gateway"; - - NonSnapshottableGatewayMetadata(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - public static NonSnapshottableGatewayMetadata readFrom(StreamInput in) throws IOException { - return readFrom(NonSnapshottableGatewayMetadata::new, in); - } - - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(TYPE, in); - } - - public static NonSnapshottableGatewayMetadata fromXContent(XContentParser parser) throws IOException { - return fromXContent(NonSnapshottableGatewayMetadata::new, parser); - } - - @Override - public EnumSet context() { - return Metadata.API_AND_GATEWAY; - } - - } - - private static class SnapshotableGatewayNoApiMetadata extends TestCustomMetadata { - public static final String TYPE = "test_snapshottable_gateway_no_api"; - - SnapshotableGatewayNoApiMetadata(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - public static SnapshotableGatewayNoApiMetadata readFrom(StreamInput in) throws IOException { - return readFrom(SnapshotableGatewayNoApiMetadata::new, in); - } - - public static SnapshotableGatewayNoApiMetadata fromXContent(XContentParser parser) throws IOException { - return fromXContent(SnapshotableGatewayNoApiMetadata::new, parser); - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY, Metadata.XContentContext.SNAPSHOT); - } - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 214d55770b044..03afc00224a4e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -28,7 +28,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.RepositoryMetadata; @@ -45,7 +45,6 @@ import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.node.Node; @@ -685,7 +684,6 @@ public void testRestoreShrinkIndex() throws Exception { public void testSnapshotWithDateMath() { final String repo = "repo"; - final IndexNameExpressionResolver nameExpressionResolver = TestIndexNameExpressionResolver.newInstance(); final String snapshotName = ""; logger.info("--> creating repository"); @@ -695,11 +693,11 @@ public void testSnapshotWithDateMath() { .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); - final String expression1 = nameExpressionResolver.resolveDateMathExpression(snapshotName); + final String expression1 = IndexNameExpressionResolver.resolveDateMathExpression(snapshotName); logger.info("--> creating date math snapshot"); createFullSnapshot(repo, snapshotName); // snapshot could be taken before or after a day rollover - final String expression2 = nameExpressionResolver.resolveDateMathExpression(snapshotName); + final String expression2 = IndexNameExpressionResolver.resolveDateMathExpression(snapshotName); SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo) .setSnapshots(Sets.newHashSet(expression1, expression2).toArray(Strings.EMPTY_ARRAY)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java index 95da673c5c40c..6f325410b9c45 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateRequest; import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index 2d0aac3b07f32..070eacfcc2897 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -12,14 +12,12 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.RepositoryException; -import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.nio.MockNioTransportPlugin; import org.junit.After; import org.junit.Before; @@ -71,9 +69,8 @@ public Path nodeConfigPath(int nodeOrdinal) { ESIntegTestCase.TestSeedPlugin.class, MockHttpTransport.TestPlugin.class, MockTransportService.TestPlugin.class, - MockNioTransportPlugin.class, InternalSettingsPlugin.class, - MockRepository.Plugin.class + getTestTransportPlugin() ), Function.identity() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 489845ef7fd68..9dc84e99c314f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.metadata.RepositoryMetadata; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index cdcea000b46fd..c9c0c4a6cd60c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 5f68fecfda41e..b9df4a3e94e65 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.snapshots; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -29,7 +27,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -1196,9 +1194,9 @@ public void testSnapshotStatus() throws Exception { return false; } SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshots.iterator().next()); - for (ObjectObjectCursor shard : entry.shards()) { - if (shard.value.nodeId().equals(blockedNodeId) == false - && shard.value.state() == SnapshotsInProgress.ShardState.SUCCESS == false) { + for (Map.Entry shard : entry.shards().entrySet()) { + if (shard.getValue().nodeId().equals(blockedNodeId) == false + && shard.getValue().state() == SnapshotsInProgress.ShardState.SUCCESS == false) { return false; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index ba135a1ca4870..bcba4d0dc5076 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -9,7 +9,7 @@ package org.elasticsearch.snapshots; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java index 9f515fd2db767..1ad079d14dd97 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java @@ -155,10 +155,10 @@ public void testIncludeGlobalState() throws Exception { assertAcked(clusterAdmin().prepareDeleteStoredScript("foobar").get()); } - logger.info("--> try restoring cluster state from snapshot without global state"); + logger.info("--> try restoring from snapshot without global state"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state") .setWaitForCompletion(true) - .setRestoreGlobalState(true) + .setRestoreGlobalState(false) .execute() .actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); @@ -227,7 +227,7 @@ public void testIncludeGlobalState() throws Exception { logger.info("--> try restoring index and cluster state from snapshot without global state"); restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index") .setWaitForCompletion(true) - .setRestoreGlobalState(true) + .setRestoreGlobalState(false) .execute() .actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 3cb6aa40060c2..8b0718a69ef8d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 0b258df26bfa8..428a5f7e726a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -26,7 +26,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -210,7 +210,7 @@ private static class TrackedCluster { private final ThreadPool threadPool = new TestThreadPool( "TrackedCluster", // a single thread for "client" activities, to limit the number of activities all starting at once - new ScalingExecutorBuilder(CLIENT, 1, 1, TimeValue.ZERO, CLIENT) + new ScalingExecutorBuilder(CLIENT, 1, 1, TimeValue.ZERO, true, CLIENT) ); private final AtomicBoolean shouldStop = new AtomicBoolean(); @@ -312,7 +312,7 @@ public void run() throws InterruptedException { } assertTrue(shouldStop.compareAndSet(false, true)); - final long permitDeadlineMillis = threadPool.relativeTimeInMillis() + TimeUnit.SECONDS.toMillis(30); + final long permitDeadlineMillis = threadPool.relativeTimeInMillis() + TimeUnit.MINUTES.toMillis(2); final List failedPermitAcquisitions = new ArrayList<>(); acquirePermitsAtEnd( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index f767bc0d8b4de..6208314be428b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -8,14 +8,11 @@ package org.elasticsearch.snapshots; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.AssociatedIndexDescriptor; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -23,7 +20,6 @@ import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.MockLogAppender; import org.junit.Before; import java.util.ArrayList; @@ -34,7 +30,6 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static org.elasticsearch.snapshots.SnapshotsService.NO_FEATURE_STATES_VALUE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -123,8 +118,7 @@ public void testSnapshotWithoutGlobalState() { .collect(Collectors.toSet()); assertThat("not-a-system-index", in(snapshottedIndices)); - // TODO: without global state the system index shouldn't be snapshotted (8.0 & later only) - // assertThat(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, not(in(snapshottedIndices))); + assertThat(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, not(in(snapshottedIndices))); } /** @@ -138,7 +132,7 @@ public void testSnapshotByFeature() { // snapshot by feature CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") - .setIncludeGlobalState(false) + .setIncludeGlobalState(true) .setWaitForCompletion(true) .setFeatureStates(SystemIndexTestPlugin.class.getSimpleName(), AnotherSystemIndexTestPlugin.class.getSimpleName()) .get(); @@ -166,10 +160,10 @@ public void testSnapshotByFeature() { /** * Take a snapshot with global state but don't restore system indexes. By - * default, snapshot restorations ignore global state. This means that, - * for now, the system index is treated as part of the snapshot and must be - * handled explicitly. Otherwise, as in this test, there will be an - * exception. + * default, snapshot restorations ignore global state and don't include system indices. + * + * This means that we should be able to take a snapshot with a system index in it and restore it without specifying indices, even if + * the cluster already has a system index with the same name (because the system index from the snapshot won't be restored). */ public void testDefaultRestoreOnlyRegularIndices() { createRepository(REPO_NAME, "fs"); @@ -181,7 +175,6 @@ public void testDefaultRestoreOnlyRegularIndices() { // snapshot including global state CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") - .setIndices(regularIndex) .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -190,22 +183,18 @@ public void testDefaultRestoreOnlyRegularIndices() { // Delete the regular index so we can restore it assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); - // restore indices by feature, with only the regular index named explicitly - SnapshotRestoreException exception = expectThrows( - SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap").setWaitForCompletion(true).get() - ); - + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + .setWaitForCompletion(true) + .get(); + assertThat(restoreResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( - exception.getMessage(), - containsString( - "cannot restore index [" + SystemIndexTestPlugin.SYSTEM_INDEX_NAME + "] because an open index with same name already exists" - ) + restoreResponse.getRestoreInfo().indices(), + allOf(hasItem(regularIndex), not(hasItem(SystemIndexTestPlugin.SYSTEM_INDEX_NAME))) ); } /** - * Take a snapshot with global state but restore features by state. + * Take a snapshot with global state but restore features by feature state. */ public void testRestoreByFeature() { createRepository(REPO_NAME, "fs"); @@ -234,10 +223,9 @@ public void testRestoreByFeature() { // Delete the regular index so we can restore it assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); - // restore indices by feature, with only the regular index named explicitly + // restore indices by feature RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) - .setIndices(regularIndex) .setFeatureStates("SystemIndexTestPlugin") .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -265,7 +253,6 @@ public void testSnapshotAndRestoreAssociatedIndices() { // snapshot CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") - .setIndices(regularIndex) .setFeatureStates(AssociatedIndicesTestPlugin.class.getSimpleName()) .setWaitForCompletion(true) .get(); @@ -336,53 +323,70 @@ public void testRestoreFeatureNotInSnapshot() { ); } - /** - * Check that directly requesting a system index in a restore request logs a deprecation warning. - * @throws IllegalAccessException if something goes wrong with the mock log appender - */ - public void testRestoringSystemIndexByNameIsDeprecated() throws IllegalAccessException { + public void testSnapshottingSystemIndexByNameIsRejected() throws Exception { createRepository(REPO_NAME, "fs"); // put a document in system index indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc"); refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - // snapshot including global state + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) + .setWaitForCompletion(true) + .setIncludeGlobalState(randomBoolean()) + .get() + ); + assertThat( + error.getMessage(), + equalTo( + "the [indices] parameter includes system indices [.test-system-idx]; to include or exclude system indices from a snapshot, " + + "use the [include_global_state] or [feature_states] parameters" + ) + ); + + // And create a successful snapshot so we don't upset the test framework CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); assertSnapshotSuccess(createSnapshotResponse); + } - // Delete the index so we can restore it without requesting the feature state - assertAcked(client().admin().indices().prepareDelete(SystemIndexTestPlugin.SYSTEM_INDEX_NAME).get()); - - // Set up a mock log appender to watch for the log message we expect - MockLogAppender mockLogAppender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger("org.elasticsearch.deprecation.snapshots.RestoreService"), mockLogAppender); - mockLogAppender.start(); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "restore-system-index-from-snapshot", - "org.elasticsearch.deprecation.snapshots.RestoreService", - Level.WARN, - "Restoring system indices by name is deprecated. Use feature states instead. System indices: [.test-system-idx]" - ) - ); + /** + * Check that directly requesting a system index in a restore request throws an Exception. + */ + public void testRestoringSystemIndexByNameIsRejected() throws IllegalAccessException { + createRepository(REPO_NAME, "fs"); + // put a document in system index + indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc"); + refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - // restore system index by name, rather than feature state - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + // snapshot including global state + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) - .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) + .setIncludeGlobalState(true) .get(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + assertSnapshotSuccess(createSnapshotResponse); - // Check that the message was logged and remove log appender - mockLogAppender.assertAllExpectationsMatched(); - mockLogAppender.stop(); - Loggers.removeAppender(LogManager.getLogger("org.elasticsearch.deprecation.snapshots.RestoreService"), mockLogAppender); + // Now that we've taken the snapshot, add another doc + indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc"); + refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - // verify only the original document is restored - assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(1L)); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + .setWaitForCompletion(true) + .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) + .get() + ); + assertThat( + ex.getMessage(), + equalTo("requested system indices [.test-system-idx], but system indices can only be restored as part of a feature state") + ); + + // Make sure the original index exists unchanged + assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); } /** @@ -457,11 +461,8 @@ public void testRestoreSystemIndicesAsGlobalStateWithDefaultFeatureStateList() { /** * If the list of feature states to restore contains only "none" and we are restoring global state, * no feature states should be restored. - * - * In this test, we explicitly request a regular index to avoid any confusion over the meaning of - * "all indices." */ - public void testRestoreSystemIndicesAsGlobalStateWithEmptyListOfFeatureStates() { + public void testRestoreSystemIndicesAsGlobalStateWithNoFeatureStates() { createRepository(REPO_NAME, "fs"); String regularIndex = "my-index"; indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc"); @@ -482,9 +483,8 @@ public void testRestoreSystemIndicesAsGlobalStateWithEmptyListOfFeatureStates() assertAcked(client().admin().indices().prepareDelete(regularIndex).get()); assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); - // restore regular index, with global state and an empty list of feature states + // restore with global state and all indices but explicitly no feature states. RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setIndices(regularIndex) .setWaitForCompletion(true) .setRestoreGlobalState(true) .setFeatureStates(new String[] { randomFrom("none", "NONE") }) @@ -493,44 +493,8 @@ public void testRestoreSystemIndicesAsGlobalStateWithEmptyListOfFeatureStates() // verify that the system index still has the updated document, i.e. has not been restored assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); - } - - /** - * If the list of feature states to restore contains only "none" and we are restoring global state, - * no feature states should be restored. However, for backwards compatibility, if no index is - * specified, system indices are included in "all indices." In this edge case, we get an error - * saying that the system index must be closed, because here it is included in "all indices." - */ - public void testRestoreSystemIndicesAsGlobalStateWithEmptyListOfFeatureStatesNoIndicesSpecified() { - createRepository(REPO_NAME, "fs"); - indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc"); - refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - - // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setIncludeGlobalState(true) - .get(); - assertSnapshotSuccess(createSnapshotResponse); - - // restore indices as global state without closing the index - SnapshotRestoreException exception = expectThrows( - SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .setFeatureStates(new String[] { randomFrom("none", "NONE") }) - .get() - ); - - assertThat( - exception.getMessage(), - containsString( - "cannot restore index [" - + SystemIndexTestPlugin.SYSTEM_INDEX_NAME - + "] because an open index with same name already exists in the cluster." - ) - ); + // And the regular index has been restored + assertThat(getDocCount(regularIndex), equalTo(1L)); } /** @@ -574,7 +538,6 @@ public void testAllSystemIndicesAreRemovedWhenThatFeatureStateIsRestored() { // restore the snapshot RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setIndices(regularIndex) .setFeatureStates("SystemIndexTestPlugin") .setWaitForCompletion(true) .setRestoreGlobalState(true) @@ -624,7 +587,6 @@ public void testSystemIndexAliasesAreAlwaysRestored() { // Now restore the snapshot with no aliases RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setIndices(regularIndex) .setFeatureStates("SystemIndexTestPlugin") .setWaitForCompletion(true) .setRestoreGlobalState(false) @@ -709,7 +671,6 @@ public void testNoneFeatureStateOnCreation() { refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME); CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") - .setIndices(regularIndex) .setWaitForCompletion(true) .setIncludeGlobalState(true) .setFeatureStates(randomFrom("none", "NONE")) @@ -728,75 +689,6 @@ public void testNoneFeatureStateOnCreation() { assertThat(snapshottedIndices, allOf(hasItem(regularIndex), not(hasItem(SystemIndexTestPlugin.SYSTEM_INDEX_NAME)))); } - public void testNoneFeatureStateOnRestore() { - createRepository(REPO_NAME, "fs"); - final String regularIndex = "test-idx"; - - indexDoc(regularIndex, "1", "purpose", "create an index that can be restored"); - indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc"); - refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - - // Create a snapshot - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") - .setIndices(regularIndex) - .setWaitForCompletion(true) - .setIncludeGlobalState(true) - .get(); - assertSnapshotSuccess(createSnapshotResponse); - - // Index another doc into the system index - indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc"); - refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); - // And delete the regular index so we can restore it - assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); - - // Restore the snapshot specifying the regular index and "none" for feature states - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setIndices(regularIndex) - .setWaitForCompletion(true) - .setRestoreGlobalState(randomBoolean()) - .setFeatureStates(randomFrom("none", "NONE")) - .get(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - - // The regular index should only have one doc - assertThat(getDocCount(regularIndex), equalTo(1L)); - // But the system index shouldn't have been touched - assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); - } - - /** - * This test checks a piece of BWC logic, and so should be removed when we block restoring system indices by name. - * - * This test checks whether it's possible to change the name of a system index when it's restored by name (rather than by feature state) - */ - public void testCanRenameSystemIndicesIfRestoredByIndexName() { - createRepository(REPO_NAME, "fs"); - indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc"); - refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - - // snapshot including our system index - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setIncludeGlobalState(false) - .get(); - assertSnapshotSuccess(createSnapshotResponse); - - // Now restore it with a rename - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) - .setWaitForCompletion(true) - .setRestoreGlobalState(false) - .setFeatureStates(NO_FEATURE_STATES_VALUE) - .setRenamePattern(".test-(.+)") - .setRenameReplacement("restored-$1") - .get(); - - assertTrue("The renamed system index should be present", indexExists("restored-system-idx")); - assertTrue("The original index should still be present", indexExists(SystemIndexTestPlugin.SYSTEM_INDEX_NAME)); - } - /** * Ensures that if we can only capture a partial snapshot of a system index, then the feature state associated with that index is * not included in the snapshot, because it would not be safe to restore that feature state. @@ -890,7 +782,6 @@ public void testParallelIndexDeleteRemovesFeatureState() throws Exception { logger.info("--> Blocked repo, starting snapshot..."); final String partialSnapName = "test-partial-snap"; ActionFuture createSnapshotFuture = clusterAdmin().prepareCreateSnapshot(REPO_NAME, partialSnapName) - .setIndices(nonsystemIndex) .setIncludeGlobalState(true) .setWaitForCompletion(true) .setPartial(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesMetricsIT.java index 3c0ce10d973de..9fa91ac09ef4d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesMetricsIT.java @@ -8,14 +8,13 @@ package org.elasticsearch.timeseries.support; -import io.github.nik9000.mapmatcher.MapMatcher; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -23,6 +22,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; @@ -39,9 +39,9 @@ import java.util.function.BiConsumer; import java.util.function.IntFunction; -import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; -import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; import static java.time.temporal.ChronoField.INSTANT_SECONDS; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; @TestLogging(value = "org.elasticsearch.timeseries.support:debug", reason = "test") public class TimeSeriesMetricsIT extends ESIntegTestCase { @@ -88,15 +88,11 @@ public void assertSmallSimple(Object d1, Object d2, CheckedConsumer { int dimCount = (i & 0x07) + 1; - Map dims = new HashMap<>(dimCount); + Map dims = Maps.newMapWithExpectedSize(dimCount); int offset = (i >> 3) & 0x03; String value = Integer.toString(i, Character.MAX_RADIX); for (int d = 0; d < dimCount; d++) { @@ -314,7 +310,7 @@ public void testManySteps() throws InterruptedException, ExecutionException, IOE String min = "2021-01-01T00:00:00Z"; long minMillis = FORMATTER.parseMillis(min); int iterationBuckets = scaledRandomIntBetween(50, 100); - int bucketCount = scaledRandomIntBetween(iterationBuckets * 2, iterationBuckets * 100); + int bucketCount = scaledRandomIntBetween(iterationBuckets * 2, iterationBuckets * 10); long maxMillis = minMillis + bucketCount * TimeUnit.SECONDS.toMillis(5); List docs = new ArrayList<>(bucketCount); for (long millis = minMillis; millis < maxMillis; millis += TimeUnit.SECONDS.toMillis(5)) { @@ -429,7 +425,7 @@ private Map>, List>> ) { return withMetrics( bucketBatchSize, - between(0, 10000), // Not used by this method + between(1, 10000), staleness, (future, metrics) -> metrics.range(List.of(eq("v")), List.of(), timeMillis, range, step, new CollectingListener(future)) ); @@ -481,7 +477,7 @@ private R withMetrics( new TimeSeriesMetricsService(client(), bucketBatchSize, docBatchSize, staleness).newMetrics( new String[] { "tsdb" }, IndicesOptions.STRICT_EXPAND_OPEN, - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(TimeSeriesMetrics metrics) { handle.accept(result, metrics); @@ -512,7 +508,7 @@ public void onTimeSeriesStart(String metric, Map dimensions) { results.put(currentDimensions, currentValues); } currentDimensions = new Tuple<>(metric, dimensions); - currentValues = new ArrayList<>(); + currentValues = results.getOrDefault(currentDimensions, new ArrayList<>()); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java index 9a1325a3030a4..98fe88c3b5327 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java @@ -21,7 +21,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.internal.transport.NoNodeAvailableException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.MergePolicyConfig; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 3a5ddf5ae87bc..07de32fef3b13 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -148,7 +148,7 @@ public void testSimpleValidateQuery() throws Exception { } public void testExplainValidateQueryTwoNodes() throws IOException { - createIndex("test"); + createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).build()); ensureGreen(); client().admin() .indices() @@ -182,6 +182,9 @@ public void testExplainValidateQueryTwoNodes() throws IOException { .execute() .actionGet(); + for (int i = 0; i < 10; i++) { + client().prepareIndex("test").setSource("foo", "text", "bar", i, "baz", "blort").execute().actionGet(); + } refresh(); for (Client client : internalCluster().getClients()) { @@ -368,17 +371,9 @@ public void testIrrelevantPropertiesBeforeQuery() { ensureGreen(); refresh(); - assertThat( - client().admin() - .indices() - .prepareValidateQuery("test") - .setQuery( - QueryBuilders.wrapperQuery(new BytesArray("{\"foo\": \"bar\", \"query\": {\"term\" : { \"user\" : \"kimchy\" }}}")) - ) - .get() - .isValid(), - equalTo(false) - ); + assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.wrapperQuery(new BytesArray(""" + {"foo": "bar", "query": {"term" : { "user" : "kimchy" }}} + """))).get().isValid(), equalTo(false)); } public void testIrrelevantPropertiesAfterQuery() { @@ -386,17 +381,9 @@ public void testIrrelevantPropertiesAfterQuery() { ensureGreen(); refresh(); - assertThat( - client().admin() - .indices() - .prepareValidateQuery("test") - .setQuery( - QueryBuilders.wrapperQuery(new BytesArray("{\"query\": {\"term\" : { \"user\" : \"kimchy\" }}, \"foo\": \"bar\"}")) - ) - .get() - .isValid(), - equalTo(false) - ); + assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.wrapperQuery(new BytesArray(""" + {"query": {"term" : { "user" : "kimchy" }}, "foo": "bar"} + """))).get().isValid(), equalTo(false)); } private static void assertExplanation(QueryBuilder queryBuilder, Matcher matcher, boolean withRewrite) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index feaf1d0523a41..b71500b73edd7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -380,8 +380,10 @@ public Version get() { } public void consume(Version version) { - if (version == null) return; - this.current.updateAndGet(current -> version.compareTo(current) <= 0 ? current : version); + if (version == null) { + return; + } + this.current.updateAndGet(currentVersion -> version.compareTo(currentVersion) <= 0 ? currentVersion : version); } } @@ -479,9 +481,9 @@ public Object initialState() { @Override public Optional nextState(Object currentState, Object input, Object output) { State state = (State) currentState; - if (output instanceof IndexResponseHistoryOutput) { + if (output instanceof IndexResponseHistoryOutput indexResponseHistoryOutput) { if (input.equals(state.safeVersion) || (state.lastFailed && ((Version) input).compareTo(state.safeVersion) > 0)) { - return Optional.of(casSuccess(((IndexResponseHistoryOutput) output).getVersion())); + return Optional.of(casSuccess(indexResponseHistoryOutput.getVersion())); } else { return Optional.empty(); } @@ -491,36 +493,11 @@ public Optional nextState(Object currentState, Object input, Object outp } } - private static final class State { - private final Version safeVersion; - private final boolean lastFailed; - - private State(Version safeVersion, boolean lastFailed) { - this.safeVersion = safeVersion; - this.lastFailed = lastFailed; - } + private record State(Version safeVersion, boolean lastFailed) { public State failed() { return lastFailed ? this : casFail(safeVersion); } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - State that = (State) o; - return lastFailed == that.lastFailed && safeVersion.equals(that.safeVersion); - } - - @Override - public int hashCode() { - return Objects.hash(safeVersion, lastFailed); - } - - @Override - public String toString() { - return "State{" + "safeVersion=" + safeVersion + ", lastFailed=" + lastFailed + '}'; - } } private static State casFail(Version stateVersion) { @@ -686,9 +663,9 @@ private static LinearizabilityChecker.History readHistory(StreamInput input) thr } private static void writeEvent(LinearizabilityChecker.Event event, BytesStreamOutput output) throws IOException { - output.writeEnum(event.type); - output.writeNamedWriteable((NamedWriteable) event.value); - output.writeInt(event.id); + output.writeEnum(event.type()); + output.writeNamedWriteable((NamedWriteable) event.value()); + output.writeInt(event.id()); } private static LinearizabilityChecker.Event readEvent(StreamInput input) throws IOException { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 1d5ca406f39a6..bc7b396f0b06a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -435,33 +435,32 @@ private interface IDSource { } private IDSource getRandomIDs() { - IDSource ids; final Random random = random(); - switch (random.nextInt(6)) { - case 0: + return switch (random.nextInt(6)) { + case 0 -> { // random simple logger.info("--> use random simple ids"); - ids = new IDSource() { + yield new IDSource() { @Override public String next() { return TestUtil.randomSimpleString(random, 1, 10); } }; - break; - case 1: + } + case 1 -> { // random realistic unicode logger.info("--> use random realistic unicode ids"); - ids = new IDSource() { + yield new IDSource() { @Override public String next() { return TestUtil.randomRealisticUnicodeString(random, 1, 20); } }; - break; - case 2: + } + case 2 -> { // sequential logger.info("--> use sequential ids"); - ids = new IDSource() { + yield new IDSource() { int upto; @Override @@ -469,11 +468,11 @@ public String next() { return Integer.toString(upto++); } }; - break; - case 3: + } + case 3 -> { // zero-pad sequential logger.info("--> use zero-padded sequential ids"); - ids = new IDSource() { + yield new IDSource() { final String zeroPad = String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random, 4, 20) + "d", 0); int upto; @@ -483,11 +482,11 @@ public String next() { return zeroPad.substring(zeroPad.length() - s.length()) + s; } }; - break; - case 4: + } + case 4 -> { // random long logger.info("--> use random long ids"); - ids = new IDSource() { + yield new IDSource() { final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX); @Override @@ -495,11 +494,11 @@ public String next() { return Long.toString(random.nextLong() & 0x3ffffffffffffffL, radix); } }; - break; - case 5: + } + case 5 -> { // zero-pad random long logger.info("--> use zero-padded random long ids"); - ids = new IDSource() { + yield new IDSource() { final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX); @Override @@ -507,12 +506,9 @@ public String next() { return Long.toString(random.nextLong() & 0x3ffffffffffffffL, radix); } }; - break; - default: - throw new AssertionError(); - } - - return ids; + } + default -> throw new AssertionError(); + }; } private static class IDAndVersion { @@ -547,8 +543,7 @@ public String toString() { sb.append(alreadyExists); if (response != null) { - if (response instanceof DeleteResponse) { - DeleteResponse deleteResponse = (DeleteResponse) response; + if (response instanceof DeleteResponse deleteResponse) { sb.append(" response:"); sb.append(" index="); sb.append(deleteResponse.getIndex()); @@ -558,8 +553,7 @@ public String toString() { sb.append(deleteResponse.getVersion()); sb.append(" found="); sb.append(deleteResponse.getResult() == DocWriteResponse.Result.DELETED); - } else if (response instanceof IndexResponse) { - IndexResponse indexResponse = (IndexResponse) response; + } else if (response instanceof IndexResponse indexResponse) { sb.append(" index="); sb.append(indexResponse.getIndex()); sb.append(" id="); diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index e343a4f26dac6..04b49e682a91a 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -16,14 +16,14 @@ import java.io.IOException; import java.net.URL; import java.security.CodeSource; -import java.util.Objects; import java.util.jar.JarInputStream; import java.util.jar.Manifest; /** * Information about a build of Elasticsearch. */ -public class Build { +public record Build(Flavor flavor, Type type, String hash, String date, boolean isSnapshot, String version) { + /** * The current build of Elasticsearch. Filled with information scanned at * startup from the jar. @@ -176,8 +176,6 @@ public static Type fromDisplayName(final String displayName, final boolean stric CURRENT = new Build(flavor, type, hash, date, isSnapshot, version); } - private final boolean isSnapshot; - /** * The location of the code source for Elasticsearch * @@ -188,29 +186,6 @@ static URL getElasticsearchCodeSourceLocation() { return codeSource == null ? null : codeSource.getLocation(); } - private final Flavor flavor; - private final Type type; - private final String hash; - private final String date; - private final String version; - - public Build(final Flavor flavor, final Type type, final String hash, final String date, boolean isSnapshot, String version) { - this.flavor = flavor; - this.type = type; - this.hash = hash; - this.date = date; - this.isSnapshot = isSnapshot; - this.version = version; - } - - public String hash() { - return hash; - } - - public String date() { - return date; - } - public static Build readBuild(StreamInput in) throws IOException { final Flavor flavor; final Type type; @@ -233,34 +208,22 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeString(build.hash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - out.writeString(build.getQualifiedVersion()); + out.writeString(build.qualifiedVersion()); } /** * Get the version as considered at build time - * + *

    * Offers a way to get the fully qualified version as configured by the build. * This will be the same as {@link Version} for production releases, but may include on of the qualifier ( e.x alpha1 ) * or -SNAPSHOT for others. * * @return the fully qualified build */ - public String getQualifiedVersion() { + public String qualifiedVersion() { return version; } - public Flavor flavor() { - return flavor; - } - - public Type type() { - return type; - } - - public boolean isSnapshot() { - return isSnapshot; - } - /** * Provides information about the intent of the build * @@ -274,41 +237,4 @@ public boolean isProductionRelease() { public String toString() { return "[" + flavor.displayName() + "][" + type.displayName + "][" + hash + "][" + date + "][" + version + "]"; } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - Build build = (Build) o; - - if (flavor.equals(build.flavor) == false) { - return false; - } - - if (type.equals(build.type) == false) { - return false; - } - - if (isSnapshot != build.isSnapshot) { - return false; - } - if (hash.equals(build.hash) == false) { - return false; - } - if (version.equals(build.version) == false) { - return false; - } - return date.equals(build.date); - } - - @Override - public int hashCode() { - return Objects.hash(flavor, type, isSnapshot, hash, date, version); - } - } diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 1c2e3bc0764f2..09f2182a32307 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -58,7 +58,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte * to control if the {@code caused_by} element should render. Unlike most parameters to {@code toXContent} methods this parameter is * internal only and not available as a URL parameter. */ - private static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; + public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; /** * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)} * to control if the {@code stack_trace} element should render. Unlike most parameters to {@code toXContent} methods this parameter is @@ -332,8 +332,7 @@ protected static void innerToXContent( headerToXContent(builder, entry.getKey().substring("es.".length()), entry.getValue()); } - if (throwable instanceof ElasticsearchException) { - ElasticsearchException exception = (ElasticsearchException) throwable; + if (throwable instanceof ElasticsearchException exception) { exception.metadataToXContent(builder, params); } @@ -1196,8 +1195,8 @@ private enum ElasticsearchExceptionHandle { ), // 93 used to be for IndexWarmerMissingException NO_NODE_AVAILABLE_EXCEPTION( - org.elasticsearch.client.transport.NoNodeAvailableException.class, - org.elasticsearch.client.transport.NoNodeAvailableException::new, + org.elasticsearch.client.internal.transport.NoNodeAvailableException.class, + org.elasticsearch.client.internal.transport.NoNodeAvailableException::new, 94, UNKNOWN_VERSION_ADDED ), diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 320b406c0a9ce..50d7bed85b9a2 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -44,6 +44,8 @@ public class Version implements Comparable, ToXContentFragment { * The unreleased last minor is the current major with a upped minor: 7._4_.0 * The unreleased revision is the very release with a upped revision 7.3._1_ */ + private static final org.apache.lucene.util.Version LUCENE_8_11_1 = org.apache.lucene.util.Version.fromBits(8, 11, 1); + public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); public static final Version V_7_0_0 = new Version(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); @@ -91,8 +93,12 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_15_0 = new Version(7150099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_15_1 = new Version(7150199, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_15_2 = new Version(7150299, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_15_3 = new Version(7150399, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_16_0 = new Version(7160099, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_1 = new Version(7160199, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_2 = new Version(7160299, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_3 = new Version(7160399, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_4 = new Version(7160499, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_17_0 = new Version(7170099, LUCENE_8_11_1); public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_9_0_0); public static final Version V_8_1_0 = new Version(8010099, org.apache.lucene.util.Version.LUCENE_9_0_0); public static final Version CURRENT = V_8_1_0; @@ -401,7 +407,7 @@ public static void main(String[] args) { final String versionOutput = String.format( Locale.ROOT, "Version: %s, Build: %s/%s/%s/%s, JVM: %s", - Build.CURRENT.getQualifiedVersion(), + Build.CURRENT.qualifiedVersion(), Build.CURRENT.flavor().displayName(), Build.CURRENT.type().displayName(), Build.CURRENT.hash(), diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index e3a4efe94361e..7763854f9e774 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -239,7 +239,7 @@ import org.elasticsearch.action.termvectors.TransportTermVectorsAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.NamedRegistry; @@ -459,7 +459,11 @@ public ActionModule( destructiveOperations = new DestructiveOperations(settings, clusterSettings); Set headers = Stream.concat( actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()), - Stream.of(new RestHeaderDefinition(Task.X_OPAQUE_ID, false), new RestHeaderDefinition(Task.TRACE_PARENT, false)) + Stream.of( + new RestHeaderDefinition(Task.X_OPAQUE_ID_HTTP_HEADER, false), + new RestHeaderDefinition(Task.TRACE_PARENT_HTTP_HEADER, false), + new RestHeaderDefinition(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER, false) + ) ).collect(Collectors.toSet()); UnaryOperator restWrapper = null; for (ActionPlugin plugin : actionPlugins) { diff --git a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index dd7af9c02efad..6209e9fce390e 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.action; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; import java.util.Objects; diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 3d05f5e2558e9..739d94fbb545a 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -12,11 +12,14 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.cluster.metadata.IndexAbstraction; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; @@ -151,6 +154,18 @@ public interface DocWriteRequest extends IndicesRequest, Accountable { */ int route(IndexRouting indexRouting); + /** + * Resolves the write index that should receive this request + * based on the provided index abstraction. + * + * @param ia The provided index abstraction + * @param metadata The metadata instance used to resolve the write index. + * @return the write index that should receive this request + */ + default Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) { + return ia.getWriteIndex(); + } + /** * Requested operation type to perform on the document */ @@ -187,18 +202,13 @@ public String getLowercase() { } public static OpType fromId(byte id) { - switch (id) { - case 0: - return INDEX; - case 1: - return CREATE; - case 2: - return UPDATE; - case 3: - return DELETE; - default: - throw new IllegalArgumentException("Unknown opType: [" + id + "]"); - } + return switch (id) { + case 0 -> INDEX; + case 1 -> CREATE; + case 2 -> UPDATE; + case 3 -> DELETE; + default -> throw new IllegalArgumentException("Unknown opType: [" + id + "]"); + }; } public static OpType fromString(String sOpType) { diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 87ce403e30b92..b74c3930d0745 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -81,20 +81,14 @@ public String getLowercase() { public static Result readFrom(StreamInput in) throws IOException { Byte opcode = in.readByte(); - switch (opcode) { - case 0: - return CREATED; - case 1: - return UPDATED; - case 2: - return DELETED; - case 3: - return NOT_FOUND; - case 4: - return NOOP; - default: - throw new IllegalArgumentException("Unknown result code: " + opcode); - } + return switch (opcode) { + case 0 -> CREATED; + case 1 -> UPDATED; + case 2 -> DELETED; + case 3 -> NOT_FOUND; + case 4 -> NOOP; + default -> throw new IllegalArgumentException("Unknown result code: " + opcode); + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java index 99dac840e2280..6ceea15d8fd11 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.allocation; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for requests to explain the allocation of a shard in the cluster diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index 86f67f34f1804..00b3f697a60e3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateObserver.Listener; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -117,12 +118,12 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { listener.onFailure(e); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { final ClusterStateObserver observer = new ClusterStateObserver( clusterService, @@ -173,7 +174,7 @@ public void onTimeout(TimeValue timeout) { observer.waitForNextChange(clusterStateListener, allNodesRemoved); } } - }); + }, ClusterStateTaskExecutor.unbatched()); } private static Set resolveVotingConfigExclusionsAndCheckMaximum( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 6fb054121f5c8..073100daeaeb9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateObserver.Listener; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -144,15 +145,16 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { listener.onFailure(e); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { listener.onResponse(ActionResponse.Empty.INSTANCE); } - } + }, + ClusterStateTaskExecutor.unbatched() ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index ec872f8dd0bf0..0d39ecb8f227a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; import org.elasticsearch.core.TimeValue; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index d5416fd8aa29f..7312f62f447ec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; @@ -26,7 +27,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -78,7 +78,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo if (indexList == null || indexList.isEmpty()) { indices = emptyMap(); } else { - indices = new HashMap<>(indexList.size()); + indices = Maps.newMapWithExpectedSize(indexList.size()); for (ClusterIndexHealth indexHealth : indexList) { indices.put(indexHealth.getIndex(), indexHealth); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index e5aa74987655b..5e2034b6905c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -18,8 +18,9 @@ import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.LocalClusterUpdateTask; +import org.elasticsearch.cluster.LocalMasterServiceTask; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -107,87 +108,73 @@ private void waitForEventsAndExecuteHealth( final long endTimeRelativeMillis ) { assert request.waitForEvents() != null; + final String source = "cluster_health (wait_for_events [" + request.waitForEvents() + "])"; if (request.local()) { - clusterService.submitStateUpdateTask( - "cluster_health (wait_for_events [" + request.waitForEvents() + "])", - new LocalClusterUpdateTask(request.waitForEvents()) { - @Override - public ClusterTasksResult execute(ClusterState currentState) { - return unchanged(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - final long timeoutInMillis = Math.max(0, endTimeRelativeMillis - threadPool.relativeTimeInMillis()); - final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); - request.timeout(newTimeout); - executeHealth( - request, - clusterService.state(), - listener, - waitCount, - observedState -> waitForEventsAndExecuteHealth(request, listener, waitCount, endTimeRelativeMillis) - ); - } + new LocalMasterServiceTask(request.waitForEvents()) { + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeRelativeMillis - threadPool.relativeTimeInMillis()); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + executeHealth( + request, + clusterService.state(), + listener, + waitCount, + observedState -> waitForEventsAndExecuteHealth(request, listener, waitCount, endTimeRelativeMillis) + ); + } - @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - listener.onFailure(e); - } + @Override + public void onFailure(Exception e) { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); } - ); + }.submit(clusterService.getMasterService(), source); } else { final TimeValue taskTimeout = TimeValue.timeValueMillis(Math.max(0, endTimeRelativeMillis - threadPool.relativeTimeInMillis())); - clusterService.submitStateUpdateTask( - "cluster_health (wait_for_events [" + request.waitForEvents() + "])", - new ClusterStateUpdateTask(request.waitForEvents(), taskTimeout) { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } + clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask(request.waitForEvents(), taskTimeout) { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - final long timeoutInMillis = Math.max(0, endTimeRelativeMillis - threadPool.relativeTimeInMillis()); - final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); - request.timeout(newTimeout); + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeRelativeMillis - threadPool.relativeTimeInMillis()); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); - // we must use the state from the applier service, because if the state-not-recovered block is in place then the - // applier service has a different view of the cluster state from the one supplied here - final ClusterState appliedState = clusterService.state(); - assert newState.stateUUID().equals(appliedState.stateUUID()) - : newState.stateUUID() + " vs " + appliedState.stateUUID(); - executeHealth( - request, - appliedState, - listener, - waitCount, - observedState -> waitForEventsAndExecuteHealth(request, listener, waitCount, endTimeRelativeMillis) - ); - } + // we must use the state from the applier service, because if the state-not-recovered block is in place then the + // applier service has a different view of the cluster state from the one supplied here + final ClusterState appliedState = clusterService.state(); + assert newState.stateUUID().equals(appliedState.stateUUID()) : newState.stateUUID() + " vs " + appliedState.stateUUID(); + executeHealth( + request, + appliedState, + listener, + waitCount, + observedState -> waitForEventsAndExecuteHealth(request, listener, waitCount, endTimeRelativeMillis) + ); + } - @Override - public void onNoLongerMaster(String source) { - logger.trace( - "stopped being master while waiting for events with priority [{}]. retrying.", - request.waitForEvents() - ); - // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException - listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); - } + @Override + public void onNoLongerMaster() { + logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); + // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException + listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); + } - @Override - public void onFailure(String source, Exception e) { - if (e instanceof ProcessClusterEventTimeoutException) { - listener.onResponse(getResponse(request, clusterService.state(), waitCount, TimeoutState.TIMED_OUT)); - } else { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - listener.onFailure(e); - } + @Override + public void onFailure(Exception e) { + if (e instanceof ProcessClusterEventTimeoutException) { + listener.onResponse(getResponse(request, clusterService.state(), waitCount, TimeoutState.TIMED_OUT)); + } else { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); } } - ); + }, ClusterStateTaskExecutor.unbatched()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index d98d6d0b1eb35..68f859467a010 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.HotThreads; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java index be88c19d34fc0..e839553021c3e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.node.info; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; // TODO: This class's interface should match that of NodesInfoRequest public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index d0cb9da9cd420..4f24593cbb55b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.node.reload; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.SecureString; /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index 9108cf2c405bf..a32e07855250a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder< NodesStatsRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index df0db7d81e94c..d3596f2a652bf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -81,10 +81,10 @@ public String getDescription() { + reason + "], waitForCompletion[" + waitForCompletion - + "], taskId[" - + getTaskId() - + "], parentTaskId[" - + getParentTaskId() + + "], targetTaskId[" + + getTargetTaskId() + + "], targetParentTaskId[" + + getTargetParentTaskId() + "], nodes" + Arrays.toString(getNodes()) + ", actions" diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java index 3daf962df44d4..e41fa85e843f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; import org.elasticsearch.action.support.tasks.TasksRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for the request to cancel tasks running on the specified nodes diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index cebdb6266f253..b7d3ec641cc5b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -42,7 +42,7 @@ public TransportCancelTasksAction(ClusterService clusterService, TransportServic actionFilters, CancelTasksRequest::new, CancelTasksResponse::new, - TaskInfo::new, + TaskInfo::from, // Cancellation is usually lightweight, and runs on the transport thread if the task didn't even start yet, but some // implementations of CancellableTask#onCancelled() are nontrivial so we use GENERIC here. TODO could it be SAME? ThreadPool.Names.GENERIC @@ -60,21 +60,21 @@ protected CancelTasksResponse newResponse( } protected void processTasks(CancelTasksRequest request, Consumer operation) { - if (request.getTaskId().isSet()) { + if (request.getTargetTaskId().isSet()) { // we are only checking one task, we can optimize it - CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId()); + CancellableTask task = taskManager.getCancellableTask(request.getTargetTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept(task); } else { - throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation"); + throw new IllegalArgumentException("task [" + request.getTargetTaskId() + "] doesn't support this operation"); } } else { - if (taskManager.getTask(request.getTaskId().getId()) != null) { + if (taskManager.getTask(request.getTargetTaskId().getId()) != null) { // The task exists, but doesn't support cancellation - throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation"); + throw new IllegalArgumentException("task [" + request.getTargetTaskId() + "] doesn't support cancellation"); } else { - throw new ResourceNotFoundException("task [{}] is not found", request.getTaskId()); + throw new ResourceNotFoundException("task [{}] is not found", request.getTargetTaskId()); } } } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java index f1fa901fc9885..01f7a201a8c5d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.get; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.TaskId; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 0662f5d738c65..32ca0a59a0b1b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.OriginSettingClient; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java index c8dd737592ef2..87045ad7ef8b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; import org.elasticsearch.action.support.tasks.TasksRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for the request to retrieve the list of tasks running on the specified nodes diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 3ed890a56dbc8..99c9054daad0c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -59,7 +59,7 @@ public ListTasksResponse( public ListTasksResponse(StreamInput in) throws IOException { super(in); - tasks = Collections.unmodifiableList(in.readList(TaskInfo::new)); + tasks = Collections.unmodifiableList(in.readList(TaskInfo::from)); } @Override @@ -102,7 +102,7 @@ protected static ConstructingObjectParser setupParser( */ public Map> getPerNodeTasks() { if (perNodeTasks == null) { - perNodeTasks = tasks.stream().collect(Collectors.groupingBy(t -> t.getTaskId().getNodeId())); + perNodeTasks = tasks.stream().collect(Collectors.groupingBy(t -> t.taskId().getNodeId())); } return perNodeTasks; } @@ -122,12 +122,12 @@ private void buildTaskGroups() { List topLevelTasks = new ArrayList<>(); // First populate all tasks for (TaskInfo taskInfo : this.tasks) { - taskGroups.put(taskInfo.getTaskId(), TaskGroup.builder(taskInfo)); + taskGroups.put(taskInfo.taskId(), TaskGroup.builder(taskInfo)); } // Now go through all task group builders and add children to their parents for (TaskGroup.Builder taskGroup : taskGroups.values()) { - TaskId parentTaskId = taskGroup.getTaskInfo().getParentTaskId(); + TaskId parentTaskId = taskGroup.getTaskInfo().parentTaskId(); if (parentTaskId.isSet()) { TaskGroup.Builder parentTask = taskGroups.get(parentTaskId); if (parentTask != null) { @@ -185,7 +185,7 @@ public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params p } builder.startObject(TASKS); for (TaskInfo task : entry.getValue()) { - builder.startObject(task.getTaskId().toString()); + builder.startObject(task.taskId().toString()); task.toXContent(builder, params); builder.endObject(); } @@ -203,7 +203,7 @@ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Param toXContentCommon(builder, params); builder.startObject(TASKS); for (TaskGroup group : getTaskGroups()) { - builder.field(group.getTaskInfo().getTaskId().toString()); + builder.field(group.taskInfo().taskId().toString()); group.toXContent(builder, params); } builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java index a02d6ed6ea998..5a33342fbcee4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java @@ -20,11 +20,7 @@ /** * Information about a currently running task and all its subtasks. */ -public class TaskGroup implements ToXContentObject { - - private final TaskInfo task; - - private final List childTasks; +public record TaskGroup(TaskInfo task, List childTasks) implements ToXContentObject { public TaskGroup(TaskInfo task, List childTasks) { this.task = task; @@ -57,14 +53,10 @@ public TaskGroup build() { } } - public TaskInfo getTaskInfo() { + public TaskInfo taskInfo() { return task; } - public List getChildTasks() { - return childTasks; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index f90068fc23f29..04071992dbdf6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -45,7 +45,7 @@ public TransportListTasksAction(ClusterService clusterService, TransportService actionFilters, ListTasksRequest::new, ListTasksResponse::new, - TaskInfo::new, + TaskInfo::from, ThreadPool.Names.MANAGEMENT ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java index fd6fb22c83915..361d4509ed95b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class NodesUsageRequestBuilder extends NodesOperationRequestBuilder< NodesUsageRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java index 31dca81ce2989..83064a6348d4c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class CleanupRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder< CleanupRepositoryRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index 9cb6e03cddd47..4d76362bb8d0f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.RepositoryCleanupInProgress; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; @@ -112,15 +113,16 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { logger.debug("Removed repository cleanup task [{}] from cluster state", repositoryCleanupInProgress); } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { logger.warn("Failed to remove repository cleanup task [{}] from cluster state", repositoryCleanupInProgress); } - } + }, + ClusterStateTaskExecutor.unbatched() ); } }); @@ -217,12 +219,12 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { after(e, null); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { startedCleanup = true; logger.debug("Initialized repository cleanup in cluster state for [{}][{}]", repositoryName, repositoryStateId); threadPool.executor(ThreadPool.Names.SNAPSHOT) @@ -266,7 +268,7 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { if (failure != null) { e.addSuppressed(failure); } @@ -278,7 +280,7 @@ public void onFailure(String source, Exception e) { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { if (failure == null) { logger.info( "Done with repository cleanup on [{}][{}] with result [{}]", @@ -299,10 +301,12 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS listener.onFailure(failure); } } - } + }, + ClusterStateTaskExecutor.unbatched() ); } - } + }, + ClusterStateTaskExecutor.unbatched() ); }, listener::onFailure); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index d2c611172cab8..498350b766448 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for unregister repository request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index 892b9d0d0ba94..9ef6b5ca8a3d5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.repositories.get; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 596bb2d2c8ced..0ef45712e5051 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index 5e6840e8c0339..7c40030f14c00 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.repositories.verify; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for verify repository request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index daed611502ee2..f8e5282df6fcd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.reroute; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand; /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 1d5d9333698c5..979ed9e54c2c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -21,6 +20,7 @@ import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -88,8 +88,7 @@ protected void masterOperation( ) { Map> stalePrimaryAllocations = new HashMap<>(); for (AllocationCommand command : request.getCommands().commands()) { - if (command instanceof AllocateStalePrimaryAllocationCommand) { - final AllocateStalePrimaryAllocationCommand cmd = (AllocateStalePrimaryAllocationCommand) command; + if (command instanceof final AllocateStalePrimaryAllocationCommand cmd) { stalePrimaryAllocations.computeIfAbsent(cmd.index(), k -> new ArrayList<>()).add(cmd); } } @@ -159,15 +158,18 @@ private void verifyThenSubmitUpdate( ); } + private static final String TASK_SOURCE = "cluster_reroute (api)"; + private void submitStateUpdate(final ClusterRerouteRequest request, final ActionListener listener) { clusterService.submitStateUpdateTask( - "cluster_reroute (api)", + TASK_SOURCE, new ClusterRerouteResponseAckedClusterStateUpdateTask(logger, allocationService, request, listener.map(response -> { if (request.dryRun() == false) { response.getExplanations().getYesDecisionMessages().forEach(logger::info); } return response; - })) + })), + ClusterStateTaskExecutor.unbatched() ); } @@ -204,9 +206,9 @@ public void onAckTimeout() { } @Override - public void onFailure(String source, Exception e) { - logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); - super.onFailure(source, e); + public void onFailure(Exception e) { + logger.debug("failed to perform [" + TASK_SOURCE + "]", e); + super.onFailure(e); } @Override @@ -217,12 +219,12 @@ public ClusterState execute(ClusterState currentState) { request.explain(), request.isRetryFailed() ); - clusterStateToSend = commandsResult.getClusterState(); + clusterStateToSend = commandsResult.clusterState(); explanations = commandsResult.explanations(); if (request.dryRun()) { return currentState; } - return commandsResult.getClusterState(); + return commandsResult.clusterState(); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 7ace2fa440d45..42409f6ba810f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.settings; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 602b4a0e15fe1..30f0fd9b07067 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -129,6 +130,9 @@ private boolean checkClearedBlockAndArchivedSettings(final Settings settings, fi return true; } + private static final String UPDATE_TASK_SOURCE = "cluster_update_settings"; + private static final String REROUTE_TASK_SOURCE = "reroute_after_cluster_update_settings"; + @Override protected void masterOperation( Task task, @@ -137,125 +141,119 @@ protected void masterOperation( final ActionListener listener ) { final SettingsUpdater updater = new SettingsUpdater(clusterSettings); - clusterService.submitStateUpdateTask( - "cluster_update_settings", - new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { + clusterService.submitStateUpdateTask(UPDATE_TASK_SOURCE, new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { + + private volatile boolean changed = false; - private volatile boolean changed = false; + @Override + protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { + return new ClusterUpdateSettingsResponse(acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); + } - @Override - protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse(acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); + @Override + public void onAllNodesAcked(@Nullable Exception e) { + if (changed) { + reroute(true); + } else { + super.onAllNodesAcked(e); } + } - @Override - public void onAllNodesAcked(@Nullable Exception e) { - if (changed) { - reroute(true); - } else { - super.onAllNodesAcked(e); - } + @Override + public void onAckTimeout() { + if (changed) { + reroute(false); + } else { + super.onAckTimeout(); } + } - @Override - public void onAckTimeout() { - if (changed) { - reroute(false); - } else { - super.onAckTimeout(); - } + private void reroute(final boolean updateSettingsAcked) { + // We're about to send a second update task, so we need to check if we're still the elected master + // For example the minimum_master_node could have been breached and we're no longer elected master, + // so we should *not* execute the reroute. + if (clusterService.state().nodes().isLocalNodeElectedMaster() == false) { + logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); + listener.onResponse( + new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()) + ); + return; } - private void reroute(final boolean updateSettingsAcked) { - // We're about to send a second update task, so we need to check if we're still the elected master - // For example the minimum_master_node could have been breached and we're no longer elected master, - // so we should *not* execute the reroute. - if (clusterService.state().nodes().isLocalNodeElectedMaster() == false) { - logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); - listener.onResponse( - new ClusterUpdateSettingsResponse( - updateSettingsAcked, - updater.getTransientUpdates(), - updater.getPersistentUpdate() - ) - ); - return; - } + // The reason the reroute needs to be send as separate update task, is that all the *cluster* settings are encapsulate + // in the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible + // to the components until the ClusterStateListener instances have been invoked, but are visible after + // the first update task has been completed. + clusterService.submitStateUpdateTask( + REROUTE_TASK_SOURCE, + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { - // The reason the reroute needs to be send as separate update task, is that all the *cluster* settings are encapsulate - // in the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible - // to the components until the ClusterStateListener instances have been invoked, but are visible after - // the first update task has been completed. - clusterService.submitStateUpdateTask( - "reroute_after_cluster_update_settings", - new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + // we wait for the reroute ack only if the update settings was acknowledged + return updateSettingsAcked; + } - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - // we wait for the reroute ack only if the update settings was acknowledged - return updateSettingsAcked; - } + @Override + // we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the + // update settings was acknowledged + protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { + return new ClusterUpdateSettingsResponse( + updateSettingsAcked && acknowledged, + updater.getTransientUpdates(), + updater.getPersistentUpdate() + ); + } - @Override - // we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the - // update settings was acknowledged - protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse( - updateSettingsAcked && acknowledged, + @Override + public void onNoLongerMaster() { + logger.debug( + "failed to preform reroute after cluster settings were updated - current node is no longer a master" + ); + listener.onResponse( + new ClusterUpdateSettingsResponse( + updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate() - ); - } - - @Override - public void onNoLongerMaster(String source) { - logger.debug( - "failed to preform reroute after cluster settings were updated - current node is no longer a master" - ); - listener.onResponse( - new ClusterUpdateSettingsResponse( - updateSettingsAcked, - updater.getTransientUpdates(), - updater.getPersistentUpdate() - ) - ); - } + ) + ); + } - @Override - public void onFailure(String source, Exception e) { - // if the reroute fails we only log - logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); - listener.onFailure(new ElasticsearchException("reroute after update settings failed", e)); - } + @Override + public void onFailure(Exception e) { + // if the reroute fails we only log + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", REROUTE_TASK_SOURCE), e); + listener.onFailure(new ElasticsearchException("reroute after update settings failed", e)); + } - @Override - public ClusterState execute(final ClusterState currentState) { - // now, reroute in case things that require it changed (e.g. number of replicas) - return allocationService.reroute(currentState, "reroute after cluster update settings"); - } + @Override + public ClusterState execute(final ClusterState currentState) { + // now, reroute in case things that require it changed (e.g. number of replicas) + return allocationService.reroute(currentState, "reroute after cluster update settings"); } - ); - } + }, + ClusterStateTaskExecutor.unbatched() + ); + } - @Override - public void onFailure(String source, Exception e) { - logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); - super.onFailure(source, e); - } + @Override + public void onFailure(Exception e) { + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", UPDATE_TASK_SOURCE), e); + super.onFailure(e); + } - @Override - public ClusterState execute(final ClusterState currentState) { - final ClusterState clusterState = updater.updateSettings( - currentState, - clusterSettings.upgradeSettings(request.transientSettings()), - clusterSettings.upgradeSettings(request.persistentSettings()), - logger - ); - changed = clusterState != currentState; - return clusterState; - } + @Override + public ClusterState execute(final ClusterState currentState) { + final ClusterState clusterState = updater.updateSettings( + currentState, + clusterSettings.upgradeSettings(request.transientSettings()), + clusterSettings.upgradeSettings(request.persistentSettings()), + logger + ); + changed = clusterState != currentState; + return clusterState; } - ); + }, ClusterStateTaskExecutor.unbatched()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index e4a081f994ddf..53e0103d5360d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder< ClusterSearchShardsRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index 2da99fd32d88a..1275a8cbfdfaa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; public class CloneSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index b663b2c11dbf7..25d3c53521345 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.Nullable; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index ceb6f1121150d..3ceab6badcaa8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Delete snapshot request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java index 89208c60f9104..4208b2683eb18 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 63094ac8febbb..c951fe3267ce4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -412,24 +412,16 @@ public String toString() { } public static SortBy of(String value) { - switch (value) { - case "start_time": - return START_TIME; - case "name": - return NAME; - case "duration": - return DURATION; - case "index_count": - return INDICES; - case "shard_count": - return SHARDS; - case "failed_shard_count": - return FAILED_SHARDS; - case "repository": - return REPOSITORY; - default: - throw new IllegalArgumentException("unknown sort order [" + value + "]"); - } + return switch (value) { + case "start_time" -> START_TIME; + case "name" -> NAME; + case "duration" -> DURATION; + case "index_count" -> INDICES; + case "shard_count" -> SHARDS; + case "failed_shard_count" -> FAILED_SHARDS; + case "repository" -> REPOSITORY; + default -> throw new IllegalArgumentException("unknown sort order [" + value + "]"); + }; } } @@ -458,32 +450,15 @@ public static After from(@Nullable SnapshotInfo snapshotInfo, SortBy sortBy) { if (snapshotInfo == null) { return null; } - final String afterValue; - switch (sortBy) { - case START_TIME: - afterValue = String.valueOf(snapshotInfo.startTime()); - break; - case NAME: - afterValue = snapshotInfo.snapshotId().getName(); - break; - case DURATION: - afterValue = String.valueOf(snapshotInfo.endTime() - snapshotInfo.startTime()); - break; - case INDICES: - afterValue = String.valueOf(snapshotInfo.indices().size()); - break; - case SHARDS: - afterValue = String.valueOf(snapshotInfo.totalShards()); - break; - case FAILED_SHARDS: - afterValue = String.valueOf(snapshotInfo.failedShards()); - break; - case REPOSITORY: - afterValue = snapshotInfo.repository(); - break; - default: - throw new AssertionError("unknown sort column [" + sortBy + "]"); - } + final String afterValue = switch (sortBy) { + case START_TIME -> String.valueOf(snapshotInfo.startTime()); + case NAME -> snapshotInfo.snapshotId().getName(); + case DURATION -> String.valueOf(snapshotInfo.endTime() - snapshotInfo.startTime()); + case INDICES -> String.valueOf(snapshotInfo.indices().size()); + case SHARDS -> String.valueOf(snapshotInfo.totalShards()); + case FAILED_SHARDS -> String.valueOf(snapshotInfo.failedShards()); + case REPOSITORY -> snapshotInfo.repository(); + }; return new After(afterValue, snapshotInfo.repository(), snapshotInfo.snapshotId().getName()); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 7eda87afbc34e..933506c150ed0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.search.sort.SortOrder; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index ea4c0fb60c227..56111e882f2fe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -577,32 +577,15 @@ private static SnapshotsInRepo sortSnapshots( int size, SortOrder order ) { - final Comparator comparator; - switch (sortBy) { - case START_TIME: - comparator = BY_START_TIME; - break; - case NAME: - comparator = BY_NAME; - break; - case DURATION: - comparator = BY_DURATION; - break; - case INDICES: - comparator = BY_INDICES_COUNT; - break; - case SHARDS: - comparator = BY_SHARDS_COUNT; - break; - case FAILED_SHARDS: - comparator = BY_FAILED_SHARDS_COUNT; - break; - case REPOSITORY: - comparator = BY_REPOSITORY; - break; - default: - throw new AssertionError("unexpected sort column [" + sortBy + "]"); - } + final Comparator comparator = switch (sortBy) { + case START_TIME -> BY_START_TIME; + case NAME -> BY_NAME; + case DURATION -> BY_DURATION; + case INDICES -> BY_INDICES_COUNT; + case SHARDS -> BY_SHARDS_COUNT; + case FAILED_SHARDS -> BY_FAILED_SHARDS_COUNT; + case REPOSITORY -> BY_REPOSITORY; + }; Stream infos = snapshotInfos.stream(); @@ -632,31 +615,31 @@ private static Predicate buildAfterPredicate( final String snapshotName = after.snapshotName(); final String repoName = after.repoName(); final String value = after.value(); - switch (sortBy) { - case START_TIME: - return filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(value), snapshotName, repoName, order); - case NAME: + return switch (sortBy) { + case START_TIME -> filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(value), snapshotName, repoName, order); + case NAME -> // TODO: cover via pre-flight predicate - return order == SortOrder.ASC + order == SortOrder.ASC ? (info -> compareName(snapshotName, repoName, info) < 0) : (info -> compareName(snapshotName, repoName, info) > 0); - case DURATION: - return filterByLongOffset(info -> info.endTime() - info.startTime(), Long.parseLong(value), snapshotName, repoName, order); - case INDICES: + case DURATION -> filterByLongOffset( + info -> info.endTime() - info.startTime(), + Long.parseLong(value), + snapshotName, + repoName, + order + ); + case INDICES -> // TODO: cover via pre-flight predicate - return filterByLongOffset(info -> info.indices().size(), Integer.parseInt(value), snapshotName, repoName, order); - case SHARDS: - return filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(value), snapshotName, repoName, order); - case FAILED_SHARDS: - return filterByLongOffset(SnapshotInfo::failedShards, Integer.parseInt(value), snapshotName, repoName, order); - case REPOSITORY: + filterByLongOffset(info -> info.indices().size(), Integer.parseInt(value), snapshotName, repoName, order); + case SHARDS -> filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(value), snapshotName, repoName, order); + case FAILED_SHARDS -> filterByLongOffset(SnapshotInfo::failedShards, Integer.parseInt(value), snapshotName, repoName, order); + case REPOSITORY -> // TODO: cover via pre-flight predicate - return order == SortOrder.ASC + order == SortOrder.ASC ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); - default: - throw new AssertionError("unexpected sort column [" + sortBy + "]"); - } + }; } private static Predicate filterByLongOffset( @@ -871,18 +854,7 @@ private static Predicate filterByLongOffset(ToLongFunction snapshotInfos; - - private final int totalCount; + private record SnapshotsInRepo(List snapshotInfos, int totalCount, int remaining) { - private final int remaining; - - SnapshotsInRepo(List snapshotInfos, int totalCount, int remaining) { - this.snapshotInfos = snapshotInfos; - this.totalCount = totalCount; - this.remaining = remaining; - } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index f8e47bfc9ba50..04e67a86e4a7a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index 14811638a2cb9..6065b44f4e7ad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -65,19 +65,13 @@ public boolean completed() { * @return state */ public static SnapshotIndexShardStage fromValue(byte value) { - switch (value) { - case 0: - return INIT; - case 1: - return STARTED; - case 2: - return FINALIZE; - case 3: - return DONE; - case 4: - return FAILURE; - default: - throw new IllegalArgumentException("No snapshot shard stage for value [" + value + "]"); - } + return switch (value) { + case 0 -> INIT; + case 1 -> STARTED; + case 2 -> FINALIZE; + case 3 -> DONE; + case 4 -> FAILURE; + default -> throw new IllegalArgumentException("No snapshot shard stage for value [" + value + "]"); + }; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 14a9ad4c28d9d..c19031c201aaa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -60,25 +60,14 @@ public SnapshotIndexShardStatus(StreamInput in) throws IOException { SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus.Copy indexShardStatus, String nodeId) { super(shardId); - switch (indexShardStatus.getStage()) { - case INIT: - stage = SnapshotIndexShardStage.INIT; - break; - case STARTED: - stage = SnapshotIndexShardStage.STARTED; - break; - case FINALIZE: - stage = SnapshotIndexShardStage.FINALIZE; - break; - case DONE: - stage = SnapshotIndexShardStage.DONE; - break; - case FAILURE: - stage = SnapshotIndexShardStage.FAILURE; - break; - default: - throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.getStage()); - } + stage = switch (indexShardStatus.getStage()) { + case INIT -> SnapshotIndexShardStage.INIT; + case STARTED -> SnapshotIndexShardStage.STARTED; + case FINALIZE -> SnapshotIndexShardStage.FINALIZE; + case DONE -> SnapshotIndexShardStage.DONE; + case FAILURE -> SnapshotIndexShardStage.FAILURE; + default -> throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.getStage()); + }; this.stats = new SnapshotStats( indexShardStatus.getStartTime(), indexShardStatus.getTotalTime(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java index f9648579e2948..5d66baf0216ad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -133,7 +134,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (shardStatuses == null || shardStatuses.isEmpty()) { indexShards = emptyMap(); } else { - indexShards = new HashMap<>(shardStatuses.size()); + indexShards = Maps.newMapWithExpectedSize(shardStatuses.size()); for (SnapshotIndexShardStatus shardStatus : shardStatuses) { indexShards.put(shardStatus.getShardId().getId(), shardStatus); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index e66bf0af476f1..5bbc5368505db 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -36,23 +36,12 @@ public class SnapshotShardsStats implements ToXContentObject { for (SnapshotIndexShardStatus shard : shards) { totalShards++; switch (shard.getStage()) { - case INIT: - initializingShards++; - break; - case STARTED: - startedShards++; - break; - case FINALIZE: - finalizingShards++; - break; - case DONE: - doneShards++; - break; - case FAILURE: - failedShards++; - break; - default: - throw new IllegalArgumentException("Unknown stage type " + shard.getStage()); + case INIT -> initializingShards++; + case STARTED -> startedShards++; + case FINALIZE -> finalizingShards++; + case DONE -> doneShards++; + case FAILURE -> failedShards++; + default -> throw new IllegalArgumentException("Unknown stage type " + shard.getStage()); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index d365dd08555c5..a515843dfb776 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -240,7 +241,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws indicesStatus = emptyMap(); shards = emptyList(); } else { - indicesStatus = new HashMap<>(indices.size()); + indicesStatus = Maps.newMapWithExpectedSize(indices.size()); shards = new ArrayList<>(); for (SnapshotIndexStatus index : indices) { indicesStatus.put(index.getIndex(), index); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index bccf7870af62b..7f64f1bd6b76d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 90672c003c112..5c079d430458e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -8,15 +8,13 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.StepListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -184,20 +182,21 @@ private void buildResponse( for (SnapshotsInProgress.Entry entry : currentSnapshotEntries) { currentSnapshotNames.add(entry.snapshot().getSnapshotId().getName()); List shardStatusBuilder = new ArrayList<>(); - for (ObjectObjectCursor shardEntry : entry - .shardsByRepoShardId()) { - SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.value; + for (Map.Entry shardEntry : entry.shardsByRepoShardId() + .entrySet()) { + SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.getValue(); if (status.nodeId() != null) { // We should have information about this shard from the shard: TransportNodesSnapshotsStatus.NodeSnapshotStatus nodeStatus = nodeSnapshotStatusMap.get(status.nodeId()); if (nodeStatus != null) { Map shardStatues = nodeStatus.status().get(entry.snapshot()); if (shardStatues != null) { - final ShardId sid = entry.shardId(shardEntry.key); + final ShardId sid = entry.shardId(shardEntry.getKey()); SnapshotIndexShardStatus shardStatus = shardStatues.get(sid); if (shardStatus != null) { // We have full information about this shard - if (shardStatus.getStage() == SnapshotIndexShardStage.DONE && shardEntry.value.state() != SUCCESS) { + if (shardStatus.getStage() == SnapshotIndexShardStage.DONE + && shardEntry.getValue().state() != SUCCESS) { // Unlikely edge case: // Data node has finished snapshotting the shard but the cluster state has not yet been updated // to reflect this. We adjust the status to show up as snapshot metadata being written because @@ -221,29 +220,16 @@ private void buildResponse( // This can happen if nodes drop out of the cluster completely or restart during the snapshot. // We rebuild the information they would have provided from their in memory state from the cluster // state and the repository contents in the below logic - final SnapshotIndexShardStage stage; - switch (shardEntry.value.state()) { - case FAILED: - case ABORTED: - case MISSING: - stage = SnapshotIndexShardStage.FAILURE; - break; - case INIT: - case WAITING: - case QUEUED: - stage = SnapshotIndexShardStage.STARTED; - break; - case SUCCESS: - stage = SnapshotIndexShardStage.DONE; - break; - default: - throw new IllegalArgumentException("Unknown snapshot state " + shardEntry.value.state()); - } + final SnapshotIndexShardStage stage = switch (shardEntry.getValue().state()) { + case FAILED, ABORTED, MISSING -> SnapshotIndexShardStage.FAILURE; + case INIT, WAITING, QUEUED -> SnapshotIndexShardStage.STARTED; + case SUCCESS -> SnapshotIndexShardStage.DONE; + }; final SnapshotIndexShardStatus shardStatus; if (stage == SnapshotIndexShardStage.DONE) { // Shard snapshot completed successfully so we should be able to load the exact statistics for this // shard from the repository already. - final ShardId shardId = entry.shardId(shardEntry.key); + final ShardId shardId = entry.shardId(shardEntry.getKey()); shardStatus = new SnapshotIndexShardStatus( shardId, repositoriesService.repository(entry.repository()) @@ -255,7 +241,7 @@ private void buildResponse( .asCopy() ); } else { - shardStatus = new SnapshotIndexShardStatus(entry.shardId(shardEntry.key), stage); + shardStatus = new SnapshotIndexShardStatus(entry.shardId(shardEntry.getKey()), stage); } shardStatusBuilder.add(shardStatus); } @@ -343,20 +329,14 @@ private void loadRepositoryData( IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy(); shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); } - final SnapshotsInProgress.State state; - switch (snapshotInfo.state()) { - case FAILED: - state = SnapshotsInProgress.State.FAILED; - break; - case SUCCESS: - case PARTIAL: + final SnapshotsInProgress.State state = switch (snapshotInfo.state()) { + case FAILED -> SnapshotsInProgress.State.FAILED; + case SUCCESS, PARTIAL -> // Translating both PARTIAL and SUCCESS to SUCCESS for now // TODO: add the differentiation on the metadata level in the next major release - state = SnapshotsInProgress.State.SUCCESS; - break; - default: - throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state()); - } + SnapshotsInProgress.State.SUCCESS; + default -> throw new IllegalArgumentException("Unexpected snapshot state " + snapshotInfo.state()); + }; final long startTime = snapshotInfo.startTime(); final long endTime = snapshotInfo.endTime(); assert endTime >= startTime || (endTime == 0L && snapshotInfo.state().completed() == false) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index 703701c1ac567..f24d013794a42 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.TimeValue; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index f72ebce8a2d6a..7e0352e742929 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.admin.cluster.state; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; @@ -37,6 +35,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Map; import java.util.function.Predicate; public class TransportClusterStateAction extends TransportMasterNodeReadAction { @@ -194,18 +193,18 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi } // filter out metadata that shouldn't be returned by the API - for (ObjectObjectCursor custom : currentState.metadata().customs()) { - if (custom.value.context().contains(Metadata.XContentContext.API) == false) { - mdBuilder.removeCustom(custom.key); + for (Map.Entry custom : currentState.metadata().customs().entrySet()) { + if (custom.getValue().context().contains(Metadata.XContentContext.API) == false) { + mdBuilder.removeCustom(custom.getKey()); } } } builder.metadata(mdBuilder); if (request.customs()) { - for (ObjectObjectCursor custom : currentState.customs()) { - if (custom.value.isPrivate() == false) { - builder.putCustom(custom.key, custom.value); + for (Map.Entry custom : currentState.customs().entrySet()) { + if (custom.getValue().isPrivate() == false) { + builder.putCustom(custom.getKey(), custom.getValue()); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index 68e012abe90ce..deca42423f755 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -8,9 +8,6 @@ package org.elasticsearch.action.admin.cluster.stats; -import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.engine.SegmentsStats; @@ -22,7 +19,9 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; public class ClusterStatsIndices implements ToXContentFragment { @@ -44,7 +43,7 @@ public ClusterStatsIndices( AnalysisStats analysisStats, VersionStats versionStats ) { - ObjectObjectHashMap countsPerIndex = new ObjectObjectHashMap<>(); + Map countsPerIndex = new HashMap<>(); this.docs = new DocsStats(); this.store = new StoreStats(); @@ -79,8 +78,8 @@ public ClusterStatsIndices( shards = new ShardStats(); indexCount = countsPerIndex.size(); - for (ObjectObjectCursor indexCountsCursor : countsPerIndex) { - shards.addIndexShardCount(indexCountsCursor.value); + for (Map.Entry indexCountsCursor : countsPerIndex.entrySet()) { + shards.addIndexShardCount(indexCountsCursor.getValue()); } this.mappings = mappingStats; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 5152568565be1..a392f94e71b19 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -21,9 +21,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.index.stats.IndexingPressureStats; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.OsInfo; @@ -59,6 +61,7 @@ public class ClusterStatsNodes implements ToXContentFragment { private final DiscoveryTypes discoveryTypes; private final PackagingTypes packagingTypes; private final IngestStats ingestStats; + private final IndexPressureStats indexPressureStats; ClusterStatsNodes(List nodeResponses) { this.versions = new HashSet<>(); @@ -92,6 +95,7 @@ public class ClusterStatsNodes implements ToXContentFragment { this.discoveryTypes = new DiscoveryTypes(nodeInfos); this.packagingTypes = new PackagingTypes(nodeInfos); this.ingestStats = new IngestStats(nodeStats); + this.indexPressureStats = new IndexPressureStats(nodeStats); } public Counts getCounts() { @@ -176,6 +180,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws ingestStats.toXContent(builder, params); + indexPressureStats.toXContent(builder, params); + return builder; } @@ -187,7 +193,7 @@ public static class Counts implements ToXContentFragment { private Counts(final List nodeInfos) { // TODO: do we need to report zeros? - final Map roles = new HashMap<>(DiscoveryNodeRole.roles().size() + 1); + final Map roles = Maps.newMapWithExpectedSize(DiscoveryNodeRole.roles().size() + 1); roles.put(COORDINATING_ONLY, 0); for (final DiscoveryNodeRole role : DiscoveryNodeRole.roles()) { roles.put(role.roleName(), 0); @@ -769,4 +775,81 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa } + static class IndexPressureStats implements ToXContentFragment { + + private final IndexingPressureStats indexingPressureStats; + + IndexPressureStats(final List nodeStats) { + long totalCombinedCoordinatingAndPrimaryBytes = 0; + long totalCoordinatingBytes = 0; + long totalPrimaryBytes = 0; + long totalReplicaBytes = 0; + + long currentCombinedCoordinatingAndPrimaryBytes = 0; + long currentCoordinatingBytes = 0; + long currentPrimaryBytes = 0; + long currentReplicaBytes = 0; + long coordinatingRejections = 0; + long primaryRejections = 0; + long replicaRejections = 0; + long memoryLimit = 0; + + long totalCoordinatingOps = 0; + long totalPrimaryOps = 0; + long totalReplicaOps = 0; + long currentCoordinatingOps = 0; + long currentPrimaryOps = 0; + long currentReplicaOps = 0; + for (NodeStats nodeStat : nodeStats) { + IndexingPressureStats nodeStatIndexingPressureStats = nodeStat.getIndexingPressureStats(); + if (nodeStatIndexingPressureStats != null) { + totalCombinedCoordinatingAndPrimaryBytes += nodeStatIndexingPressureStats.getTotalCombinedCoordinatingAndPrimaryBytes(); + totalCoordinatingBytes += nodeStatIndexingPressureStats.getTotalCoordinatingBytes(); + totalPrimaryBytes += nodeStatIndexingPressureStats.getTotalPrimaryBytes(); + totalReplicaBytes += nodeStatIndexingPressureStats.getTotalReplicaBytes(); + currentCombinedCoordinatingAndPrimaryBytes += nodeStatIndexingPressureStats + .getCurrentCombinedCoordinatingAndPrimaryBytes(); + currentCoordinatingBytes += nodeStatIndexingPressureStats.getCurrentCoordinatingBytes(); + currentPrimaryBytes += nodeStatIndexingPressureStats.getCurrentPrimaryBytes(); + currentReplicaBytes += nodeStatIndexingPressureStats.getCurrentReplicaBytes(); + coordinatingRejections += nodeStatIndexingPressureStats.getCoordinatingRejections(); + primaryRejections += nodeStatIndexingPressureStats.getPrimaryRejections(); + replicaRejections += nodeStatIndexingPressureStats.getReplicaRejections(); + memoryLimit += nodeStatIndexingPressureStats.getMemoryLimit(); + totalCoordinatingOps += nodeStatIndexingPressureStats.getTotalCoordinatingOps(); + totalReplicaOps += nodeStatIndexingPressureStats.getTotalReplicaOps(); + currentCoordinatingOps += nodeStatIndexingPressureStats.getCurrentCoordinatingOps(); + currentPrimaryOps += nodeStatIndexingPressureStats.getCurrentPrimaryOps(); + currentReplicaOps += nodeStatIndexingPressureStats.getCurrentReplicaOps(); + } + } + indexingPressureStats = new IndexingPressureStats( + totalCombinedCoordinatingAndPrimaryBytes, + totalCoordinatingBytes, + totalPrimaryBytes, + totalReplicaBytes, + currentCombinedCoordinatingAndPrimaryBytes, + currentCoordinatingBytes, + currentPrimaryBytes, + currentReplicaBytes, + coordinatingRejections, + primaryRejections, + replicaRejections, + memoryLimit, + totalCoordinatingOps, + totalPrimaryOps, + totalReplicaOps, + currentCoordinatingOps, + currentPrimaryOps, + currentReplicaOps + ); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return indexingPressureStats.toXContent(builder, params); + } + + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index 3a58feb0c22a6..0213c3d2861ac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder< ClusterStatsRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index 4317a59318414..0c4276833cb3c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -76,8 +76,7 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { stats.indexCount++; } Object scriptObject = fieldMapping.get("script"); - if (scriptObject instanceof Map) { - Map script = (Map) scriptObject; + if (scriptObject instanceof Map script) { Object sourceObject = script.get("source"); stats.scriptCount++; updateScriptParams(sourceObject, stats.fieldScriptStats); @@ -106,8 +105,7 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { Object scriptObject = fieldMapping.get("script"); if (scriptObject == null) { stats.scriptLessCount++; - } else if (scriptObject instanceof Map) { - Map script = (Map) scriptObject; + } else if (scriptObject instanceof Map script) { Object sourceObject = script.get("source"); updateScriptParams(sourceObject, stats.fieldScriptStats); Object langObject = script.get("lang"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java index bcdb13f65b920..1e054d95b753e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.admin.cluster.stats; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -68,8 +66,8 @@ public static VersionStats of(Metadata metadata, List } // Loop through all indices in the metadata, building the counts as needed - for (ObjectObjectCursor cursor : metadata.indices()) { - IndexMetadata indexMetadata = cursor.value; + for (Map.Entry cursor : metadata.indices().entrySet()) { + IndexMetadata indexMetadata = cursor.getValue(); // Increment version-specific index counts indexCounts.compute(indexMetadata.getCreationVersion(), (v, i) -> { if (i == null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index ffaaece5bfdb3..e5def5f928a66 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< DeleteStoredScriptRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java index b2cb2b49d699f..e10f703558e9f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptContextInfo; @@ -22,7 +23,6 @@ import java.io.IOException; import java.util.Collections; import java.util.Comparator; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -57,7 +57,7 @@ public class GetScriptContextResponse extends ActionResponse implements StatusTo GetScriptContextResponse(StreamInput in) throws IOException { super(in); int size = in.readInt(); - HashMap contexts = new HashMap<>(size); + Map contexts = Maps.newMapWithExpectedSize(size); for (int i = 0; i < size; i++) { ScriptContextInfo info = new ScriptContextInfo(in); contexts.put(info.name, info); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java index bfbdc95334175..29343324b268c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class GetStoredScriptRequestBuilder extends MasterNodeReadOperationRequestBuilder< GetStoredScriptRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index 9c64c3f8dfbf4..a4a5543a90ce8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java index 3a9630784888a..10d84e9f9e603 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.tasks; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder< PendingClusterTasksRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java index 9dacc8ab87739..bb202dc6c3772 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java @@ -28,6 +28,7 @@ import java.io.InputStream; import java.util.Map; import java.util.Objects; +import java.util.Set; /** * Represents an alias, to be associated with an index @@ -231,6 +232,18 @@ public static Alias fromXContent(XContentParser parser) throws IOException { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + // check if there are any unknown fields + Set knownFieldNames = Set.of( + FILTER.getPreferredName(), + ROUTING.getPreferredName(), + INDEX_ROUTING.getPreferredName(), + SEARCH_ROUTING.getPreferredName(), + IS_WRITE_INDEX.getPreferredName(), + IS_HIDDEN.getPreferredName() + ); + if (knownFieldNames.contains(currentFieldName) == false) { + throw new IllegalArgumentException("Unknown field [" + currentFieldName + "] in alias [" + alias.name + "]"); + } } else if (token == XContentParser.Token.START_OBJECT) { if (FILTER.match(currentFieldName, parser.getDeprecationHandler())) { Map filter = parser.mapOrdered(); @@ -250,6 +263,8 @@ public static Alias fromXContent(XContentParser parser) throws IOException { } else if (IS_HIDDEN.match(currentFieldName, parser.getDeprecationHandler())) { alias.isHidden(parser.booleanValue()); } + } else { + throw new IllegalArgumentException("Unknown token [" + token + "] in alias [" + alias.name + "]"); } } return alias; @@ -276,7 +291,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - builder.field(IS_WRITE_INDEX.getPreferredName(), writeIndex); + if (writeIndex != null) { + builder.field(IS_WRITE_INDEX.getPreferredName(), writeIndex); + } if (isHidden != null) { builder.field(IS_HIDDEN.getPreferredName(), isHidden); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 739dbf07f4e55..bc11b7e06ff57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -106,16 +106,12 @@ public byte value() { } public static Type fromValue(byte value) { - switch (value) { - case 0: - return ADD; - case 1: - return REMOVE; - case 2: - return REMOVE_INDEX; - default: - throw new IllegalArgumentException("No type for action [" + value + "]"); - } + return switch (value) { + case 0 -> ADD; + case 1 -> REMOVE; + case 2 -> REMOVE_INDEX; + default -> throw new IllegalArgumentException("No type for action [" + value + "]"); + }; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java index 53743786c5aed..ac4c0f6a75d9c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.index.query.QueryBuilder; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 616540e8a1bce..e212cd16f6075 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.Index; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -60,6 +61,7 @@ public class TransportIndicesAliasesAction extends AcknowledgedTransportMasterNo private final MetadataIndexAliasesService indexAliasesService; private final RequestValidators requestValidators; + private final SystemIndices systemIndices; @Inject public TransportIndicesAliasesAction( @@ -69,7 +71,8 @@ public TransportIndicesAliasesAction( final MetadataIndexAliasesService indexAliasesService, final ActionFilters actionFilters, final IndexNameExpressionResolver indexNameExpressionResolver, - final RequestValidators requestValidators + final RequestValidators requestValidators, + final SystemIndices systemIndices ) { super( IndicesAliasesAction.NAME, @@ -83,6 +86,7 @@ public TransportIndicesAliasesAction( ); this.indexAliasesService = indexAliasesService; this.requestValidators = Objects.requireNonNull(requestValidators); + this.systemIndices = systemIndices; } @Override @@ -127,7 +131,7 @@ protected void masterOperation( }).collect(Collectors.toList()); concreteIndices = nonBackingIndices.toArray(Index[]::new); switch (action.actionType()) { - case ADD: + case ADD -> { // Fail if parameters are used that data stream aliases don't support: if (action.routing() != null) { throw new IllegalArgumentException("aliases that point to data streams don't support routing"); @@ -155,7 +159,8 @@ protected void masterOperation( } } continue; - case REMOVE: + } + case REMOVE -> { for (String dataStreamName : concreteDataStreams) { for (String alias : concreteDataStreamAliases(action, state.metadata(), dataStreamName)) { finalActions.add(new AliasAction.RemoveDataStreamAlias(alias, dataStreamName, action.mustExist())); @@ -164,12 +169,11 @@ protected void masterOperation( if (nonBackingIndices.isEmpty() == false) { // Regular aliases/indices match as well with the provided expression. // (Only when adding new aliases, matching both data streams and indices is disallowed) - break; } else { continue; } - default: - throw new IllegalArgumentException("Unsupported action [" + action.actionType() + "]"); + } + default -> throw new IllegalArgumentException("Unsupported action [" + action.actionType() + "]"); } } else { concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), false, action.indices()); @@ -200,7 +204,7 @@ protected void masterOperation( switch (action.actionType()) { case ADD: for (String alias : concreteAliases(action, state.metadata(), index.getName())) { - String resolvedName = this.indexNameExpressionResolver.resolveDateMathExpression(alias, now); + String resolvedName = IndexNameExpressionResolver.resolveDateMathExpression(alias, now); finalActions.add( new AliasAction.Add( index.getName(), @@ -209,7 +213,7 @@ protected void masterOperation( action.indexRouting(), action.searchRouting(), action.writeIndex(), - action.isHidden() + systemIndices.isSystemName(resolvedName) ? Boolean.TRUE : action.isHidden() ) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index 2384f8443524b..4f80d933fa460 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; public abstract class BaseAliasesRequestBuilder< diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java index eff93a8b626f0..a7fd1f02f4650 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.alias.get; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index f314273ba09bf..ccaa573732808 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -195,49 +195,5 @@ private static void checkSystemIndexAccess( if (netNewSystemIndices.isEmpty() == false) { throw systemIndices.netNewSystemIndexAccessException(threadContext, netNewSystemIndices); } - checkSystemAliasAccess(request, systemIndices, systemIndexAccessLevel, threadContext); - } - - private static void checkSystemAliasAccess( - GetAliasesRequest request, - SystemIndices systemIndices, - SystemIndexAccessLevel systemIndexAccessLevel, - ThreadContext threadContext - ) { - final Predicate systemIndexAccessAllowPredicate; - if (systemIndexAccessLevel == SystemIndexAccessLevel.NONE) { - systemIndexAccessAllowPredicate = name -> true; - } else if (systemIndexAccessLevel == SystemIndexAccessLevel.RESTRICTED) { - systemIndexAccessAllowPredicate = systemIndices.getProductSystemIndexNamePredicate(threadContext).negate(); - } else { - throw new IllegalArgumentException("Unexpected system index access level: " + systemIndexAccessLevel); - } - - final List systemAliases = new ArrayList<>(); - final List netNewSystemAliases = new ArrayList<>(); - for (String alias : request.aliases()) { - if (systemIndices.isSystemName(alias)) { - if (systemIndexAccessAllowPredicate.test(alias)) { - if (systemIndices.isNetNewSystemIndex(alias)) { - netNewSystemAliases.add(alias); - } else { - systemAliases.add(alias); - } - } - } - } - - if (systemAliases.isEmpty() == false) { - deprecationLogger.warn( - DeprecationCategory.API, - "open_system_alias_access", - "this request accesses aliases with names reserved for system indices: {}, but in a future major version, direct " - + "access to system indices and their aliases will not be allowed", - systemAliases - ); - } - if (netNewSystemAliases.isEmpty() == false) { - throw systemIndices.netNewSystemIndexAccessException(threadContext, netNewSystemAliases); - } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java index 4f1d9a47340d7..fe07ce83da578 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index d811ac993a5d3..364c32237bf81 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -300,8 +300,7 @@ private static AnalyzeAction.DetailAnalyzeResponse detailAnalyze(AnalyzeAction.R potentialCustomAnalyzer = ((NamedAnalyzer) analyzer).analyzer(); } - if (potentialCustomAnalyzer instanceof AnalyzerComponentsProvider) { - AnalyzerComponentsProvider customAnalyzer = (AnalyzerComponentsProvider) potentialCustomAnalyzer; + if (potentialCustomAnalyzer instanceof AnalyzerComponentsProvider customAnalyzer) { // note: this is not field-name dependent in our cases so we can leave out the argument int positionIncrementGap = potentialCustomAnalyzer.getPositionIncrementGap(""); int offsetGap = potentialCustomAnalyzer.getOffsetGap(""); @@ -547,8 +546,7 @@ private static Map extractExtendedAttributes(TokenStream stream, return; } if (includeAttributes == null || includeAttributes.isEmpty() || includeAttributes.contains(key.toLowerCase(Locale.ROOT))) { - if (value instanceof BytesRef) { - final BytesRef p = (BytesRef) value; + if (value instanceof final BytesRef p) { value = p.toString(); } extendedAttributes.put(key, value); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index b5bf127f17975..8610b4ef8ce7c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.cache.clear; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder< ClearIndicesCacheRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java index 43d56f000235f..7e6b31271ae90 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for close index request diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 6e9c77e99b8c7..adabbd19f6fc1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -20,6 +20,8 @@ import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -29,6 +31,7 @@ import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; @@ -41,9 +44,14 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_HIDDEN; + /** * Api that auto creates an index or data stream that originate from requests that write into an index that doesn't yet exist. */ @@ -66,6 +74,8 @@ public static final class TransportAction extends TransportMasterNodeAction executor; + @Inject public TransportAction( TransportService transportService, @@ -76,7 +86,8 @@ public TransportAction( MetadataCreateIndexService createIndexService, MetadataCreateDataStreamService metadataCreateDataStreamService, AutoCreateIndex autoCreateIndex, - SystemIndices systemIndices + SystemIndices systemIndices, + AllocationService allocationService ) { super( NAME, @@ -94,6 +105,31 @@ public TransportAction( this.createIndexService = createIndexService; this.metadataCreateDataStreamService = metadataCreateDataStreamService; this.autoCreateIndex = autoCreateIndex; + executor = (currentState, tasks) -> { + ClusterTasksResult.Builder builder = ClusterTasksResult.builder(); + ClusterState state = currentState; + final Map successfulRequests = new HashMap<>(tasks.size()); + for (CreateIndexTask task : tasks) { + try { + final CreateIndexTask successfulBefore = successfulRequests.putIfAbsent(task.request, task); + if (successfulBefore == null) { + state = task.execute(state); + } else { + // TODO: clean this up to just deduplicate the task listener instead of setting the generated name from + // duplicate tasks here and then waiting for shards to become available multiple times in parallel for + // each duplicate task + task.indexNameRef.set(successfulBefore.indexNameRef.get()); + } + builder.success(task); + } catch (Exception e) { + builder.failure(task, e); + } + } + if (state != currentState) { + state = allocationService.reroute(state, "auto-create"); + } + return builder.build(state); + }; } @Override @@ -112,145 +148,168 @@ protected void masterOperation( new String[] { indexName }, ActiveShardCount.DEFAULT, request.timeout(), - shardsAcked -> { finalListener.onResponse(new CreateIndexResponse(true, shardsAcked, indexName)); }, + shardsAcked -> finalListener.onResponse(new CreateIndexResponse(true, shardsAcked, indexName)), finalListener::onFailure ); } else { finalListener.onResponse(new CreateIndexResponse(false, false, indexName)); } }, finalListener::onFailure); - clusterService.submitStateUpdateTask( - "auto create [" + request.index() + "]", - new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - final SystemDataStreamDescriptor dataStreamDescriptor = systemIndices.validateDataStreamAccess( - request.index(), - threadPool.getThreadContext() - ); - final boolean isSystemDataStream = dataStreamDescriptor != null; - final boolean isSystemIndex = isSystemDataStream == false && systemIndices.isSystemIndex(request.index()); - final ComposableIndexTemplate template = resolveTemplate(request, currentState.metadata()); - final boolean isDataStream = isSystemIndex == false - && (isSystemDataStream || (template != null && template.getDataStreamTemplate() != null)); - - if (isDataStream) { - // This expression only evaluates to true when the argument is non-null and false - if (isSystemDataStream == false && Boolean.FALSE.equals(template.getAllowAutoCreate())) { - throw new IndexNotFoundException( - "composable template " + template.indexPatterns() + " forbids index auto creation" - ); - } - - CreateDataStreamClusterStateUpdateRequest createRequest = new CreateDataStreamClusterStateUpdateRequest( - request.index(), - dataStreamDescriptor, - request.masterNodeTimeout(), - request.timeout() - ); - ClusterState clusterState = metadataCreateDataStreamService.createDataStream(createRequest, currentState); - indexNameRef.set(clusterState.metadata().dataStreams().get(request.index()).getIndices().get(0).getName()); - return clusterState; - } else { - String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); - indexNameRef.set(indexName); - if (isSystemIndex) { - if (indexName.equals(request.index()) == false) { - throw new IllegalStateException("system indices do not support date math expressions"); - } - } else { - // This will throw an exception if the index does not exist and creating it is prohibited - final boolean shouldAutoCreate = autoCreateIndex.shouldAutoCreate(indexName, currentState); - - if (shouldAutoCreate == false) { - // The index already exists. - return currentState; - } - } - - final SystemIndexDescriptor mainDescriptor = isSystemIndex - ? systemIndices.findMatchingDescriptor(indexName) - : null; - final boolean isManagedSystemIndex = mainDescriptor != null && mainDescriptor.isAutomaticallyManaged(); - - final CreateIndexClusterStateUpdateRequest updateRequest; - - if (isManagedSystemIndex) { - final SystemIndexDescriptor descriptor = mainDescriptor.getDescriptorCompatibleWith( - state.nodes().getSmallestNonClientNodeVersion() - ); - if (descriptor == null) { - final String message = mainDescriptor.getMinimumNodeVersionMessage("auto-create index"); - logger.warn(message); - throw new IllegalStateException(message); - } - - updateRequest = buildSystemIndexUpdateRequest(indexName, descriptor); - } else { - updateRequest = buildUpdateRequest(indexName); - } - - return createIndexService.applyCreateIndexRequest(currentState, updateRequest, false); - } - } + CreateIndexTask clusterTask = new CreateIndexTask(request, listener, indexNameRef); + clusterService.submitStateUpdateTask("auto create [" + request.index() + "]", clusterTask, clusterTask, executor); + } - private CreateIndexClusterStateUpdateRequest buildUpdateRequest(String indexName) { - CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest( - request.cause(), - indexName, - request.index() - ).ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()); - logger.debug("Auto-creating index {}", indexName); - return updateRequest; - } + @Override + protected ClusterBlockException checkBlock(CreateIndexRequest request, ClusterState state) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.index()); + } + + // TODO: split the listner out of this task and use AckedClusterStateTaskListener directly to avoid the complicated listener + // construction upstream when instantiating these + private final class CreateIndexTask extends AckedClusterStateUpdateTask { - private CreateIndexClusterStateUpdateRequest buildSystemIndexUpdateRequest( - String indexName, - SystemIndexDescriptor descriptor - ) { - String mappings = descriptor.getMappings(); - Settings settings = descriptor.getSettings(); - String aliasName = descriptor.getAliasName(); + final CreateIndexRequest request; + final AtomicReference indexNameRef; - // if we are writing to the alias name, we should create the primary index here - String concreteIndexName = indexName.equals(aliasName) ? descriptor.getPrimaryIndex() : indexName; + CreateIndexTask( + CreateIndexRequest request, + ActionListener listener, + AtomicReference indexNameRef + ) { + super(Priority.URGENT, request, listener); + this.request = request; + this.indexNameRef = indexNameRef; + } - CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest( - request.cause(), - concreteIndexName, - request.index() - ).ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()); + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + final SystemDataStreamDescriptor dataStreamDescriptor = systemIndices.validateDataStreamAccess( + request.index(), + threadPool.getThreadContext() + ); + final boolean isSystemDataStream = dataStreamDescriptor != null; + final boolean isSystemIndex = isSystemDataStream == false && systemIndices.isSystemIndex(request.index()); + final ComposableIndexTemplate template = resolveTemplate(request, currentState.metadata()); + final boolean isDataStream = isSystemIndex == false + && (isSystemDataStream || (template != null && template.getDataStreamTemplate() != null)); - updateRequest.waitForActiveShards(ActiveShardCount.ALL); + if (isDataStream) { + // This expression only evaluates to true when the argument is non-null and false + if (isSystemDataStream == false && Boolean.FALSE.equals(template.getAllowAutoCreate())) { + throw new IndexNotFoundException( + "composable template " + template.indexPatterns() + " forbids index auto creation" + ); + } - if (mappings != null) { - updateRequest.mappings(mappings); + CreateDataStreamClusterStateUpdateRequest createRequest = new CreateDataStreamClusterStateUpdateRequest( + request.index(), + dataStreamDescriptor, + request.masterNodeTimeout(), + request.timeout(), + false + ); + ClusterState clusterState = metadataCreateDataStreamService.createDataStream(createRequest, currentState); + indexNameRef.set(clusterState.metadata().dataStreams().get(request.index()).getIndices().get(0).getName()); + return clusterState; + } else { + String indexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); + indexNameRef.set(indexName); + if (isSystemIndex) { + if (indexName.equals(request.index()) == false) { + throw new IllegalStateException("system indices do not support date math expressions"); } - if (settings != null) { - updateRequest.settings(settings); + } else { + // This will throw an exception if the index does not exist and creating it is prohibited + final boolean shouldAutoCreate = autoCreateIndex.shouldAutoCreate(indexName, currentState); + + if (shouldAutoCreate == false) { + // The index already exists. + return currentState; } - if (aliasName != null) { - updateRequest.aliases(Set.of(new Alias(aliasName))); + } + + final SystemIndexDescriptor mainDescriptor = isSystemIndex ? systemIndices.findMatchingDescriptor(indexName) : null; + final boolean isManagedSystemIndex = mainDescriptor != null && mainDescriptor.isAutomaticallyManaged(); + + final CreateIndexClusterStateUpdateRequest updateRequest; + + if (isManagedSystemIndex) { + final SystemIndexDescriptor descriptor = mainDescriptor.getDescriptorCompatibleWith( + currentState.nodes().getSmallestNonClientNodeVersion() + ); + if (descriptor == null) { + final String message = mainDescriptor.getMinimumNodeVersionMessage("auto-create index"); + logger.warn(message); + throw new IllegalStateException(message); } - if (logger.isDebugEnabled()) { - if (concreteIndexName.equals(indexName) == false) { - logger.debug("Auto-creating backing system index {} for alias {}", concreteIndexName, indexName); - } else { - logger.debug("Auto-creating system index {}", concreteIndexName); - } + updateRequest = buildSystemIndexUpdateRequest(indexName, descriptor); + } else if (isSystemIndex) { + updateRequest = buildUpdateRequest(indexName); + + if (Objects.isNull(request.settings())) { + updateRequest.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); + } else if (false == request.settings().hasValue(SETTING_INDEX_HIDDEN)) { + updateRequest.settings(Settings.builder().put(request.settings()).put(SETTING_INDEX_HIDDEN, true).build()); + } else if ("false".equals(request.settings().get(SETTING_INDEX_HIDDEN))) { + final String message = "Cannot auto-create system index [" + indexName + "] with [index.hidden] set to 'false'"; + logger.warn(message); + throw new IllegalStateException(message); } + } else { + updateRequest = buildUpdateRequest(indexName); + } + + return createIndexService.applyCreateIndexRequest(currentState, updateRequest, false); + } + } + + private CreateIndexClusterStateUpdateRequest buildUpdateRequest(String indexName) { + CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest( + request.cause(), + indexName, + request.index() + ).ackTimeout(request.timeout()).performReroute(false).masterNodeTimeout(request.masterNodeTimeout()); + logger.debug("Auto-creating index {}", indexName); + return updateRequest; + } + + private CreateIndexClusterStateUpdateRequest buildSystemIndexUpdateRequest(String indexName, SystemIndexDescriptor descriptor) { + String mappings = descriptor.getMappings(); + Settings settings = descriptor.getSettings(); + String aliasName = descriptor.getAliasName(); + + // if we are writing to the alias name, we should create the primary index here + String concreteIndexName = indexName.equals(aliasName) ? descriptor.getPrimaryIndex() : indexName; - return updateRequest; + CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest( + request.cause(), + concreteIndexName, + request.index() + ).ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()).performReroute(false); + + updateRequest.waitForActiveShards(ActiveShardCount.ALL); + + if (mappings != null) { + updateRequest.mappings(mappings); + } + if (settings != null) { + updateRequest.settings(settings); + } + if (aliasName != null) { + updateRequest.aliases(Set.of(new Alias(aliasName).isHidden(true))); + } + + if (logger.isDebugEnabled()) { + if (concreteIndexName.equals(indexName) == false) { + logger.debug("Auto-creating backing system index {} for alias {}", concreteIndexName, indexName); + } else { + logger.debug("Auto-creating system index {}", concreteIndexName); } } - ); - } - @Override - protected ClusterBlockException checkBlock(CreateIndexRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.index()); + return updateRequest; + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 0603f96c53ab6..8b0eecf401ea2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -46,12 +46,12 @@ import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; /** - * A request to create an index. Best created with {@link org.elasticsearch.client.Requests#createIndexRequest(String)}. + * A request to create an index. Best created with {@link org.elasticsearch.client.internal.Requests#createIndexRequest(String)}. *

    * The index created can optionally be created with {@link #settings(org.elasticsearch.common.settings.Settings)}. * - * @see org.elasticsearch.client.IndicesAdminClient#create(CreateIndexRequest) - * @see org.elasticsearch.client.Requests#createIndexRequest(String) + * @see org.elasticsearch.client.internal.IndicesAdminClient#create(CreateIndexRequest) + * @see org.elasticsearch.client.internal.Requests#createIndexRequest(String) * @see CreateIndexResponse */ public class CreateIndexRequest extends AcknowledgedRequest implements IndicesRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index afe31552d0bd1..c07734aee557c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 627c4943bafa0..a7767a96a2cdb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -34,6 +34,9 @@ import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_HIDDEN; /** * Create index action. @@ -87,10 +90,11 @@ protected void masterOperation( } final long resolvedAt = System.currentTimeMillis(); - final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index(), resolvedAt); + final String indexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index(), resolvedAt); final SystemIndexDescriptor mainDescriptor = systemIndices.findMatchingDescriptor(indexName); - final boolean isSystemIndex = mainDescriptor != null && mainDescriptor.isAutomaticallyManaged(); + final boolean isSystemIndex = mainDescriptor != null; + final boolean isManagedSystemIndex = isSystemIndex && mainDescriptor.isAutomaticallyManaged(); if (mainDescriptor != null && mainDescriptor.isNetNew()) { final SystemIndexAccessLevel systemIndexAccessLevel = systemIndices.getSystemIndexAccessLevel(threadPool.getThreadContext()); if (systemIndexAccessLevel != SystemIndexAccessLevel.ALL) { @@ -107,13 +111,26 @@ protected void masterOperation( } } + if (isSystemIndex) { + if (Objects.isNull(request.settings())) { + request.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); + } else if (false == request.settings().hasValue(SETTING_INDEX_HIDDEN)) { + request.settings(Settings.builder().put(request.settings()).put(SETTING_INDEX_HIDDEN, true).build()); + } else if (Boolean.FALSE.toString().equalsIgnoreCase(request.settings().get(SETTING_INDEX_HIDDEN))) { + final String message = "Cannot create system index [" + indexName + "] with [index.hidden] set to 'false'"; + logger.warn(message); + listener.onFailure(new IllegalStateException(message)); + return; + } + } + final CreateIndexClusterStateUpdateRequest updateRequest; // Requests that a cluster generates itself are permitted to create a system index with // different mappings, settings etc. This is so that rolling upgrade scenarios still work. // We check this via the request's origin. Eventually, `SystemIndexManager` will reconfigure // the index to the latest settings. - if (isSystemIndex && Strings.isNullOrEmpty(request.origin())) { + if (isManagedSystemIndex && Strings.isNullOrEmpty(request.origin())) { final SystemIndexDescriptor descriptor = mainDescriptor.getDescriptorCompatibleWith( state.nodes().getSmallestNonClientNodeVersion() ); @@ -140,11 +157,16 @@ private CreateIndexClusterStateUpdateRequest buildUpdateRequest( String indexName, long nameResolvedAt ) { + Set aliases = request.aliases().stream().peek(alias -> { + if (systemIndices.isSystemName(alias.name())) { + alias.isHidden(true); + } + }).collect(Collectors.toSet()); return new CreateIndexClusterStateUpdateRequest(cause, indexName, request.index()).ackTimeout(request.timeout()) .masterNodeTimeout(request.masterNodeTimeout()) .settings(request.settings()) .mappings(request.mappings()) - .aliases(request.aliases()) + .aliases(aliases) .nameResolvedInstant(nameResolvedAt) .waitForActiveShards(request.waitForActiveShards()); } @@ -160,9 +182,10 @@ private CreateIndexClusterStateUpdateRequest buildSystemIndexUpdateRequest( if (descriptor.getAliasName() == null) { aliases = Set.of(); } else { - aliases = Set.of(new Alias(descriptor.getAliasName())); + aliases = Set.of(new Alias(descriptor.getAliasName()).isHidden(true)); } + // Here, we override the user's requested index with the descriptor's primary index final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest( cause, descriptor.getPrimaryIndex(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index e6a9230a5e515..12cf108ca9dc0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.admin.indices.dangling.delete; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -22,9 +20,10 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -39,6 +38,7 @@ import org.elasticsearch.transport.TransportService; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; /** @@ -110,7 +110,8 @@ public void onResponse(Index indexToDelete) { public ClusterState execute(final ClusterState currentState) { return deleteDanglingIndex(currentState, indexToDelete); } - } + }, + ClusterStateTaskExecutor.unbatched() ); } @@ -125,8 +126,8 @@ public void onFailure(Exception e) { private ClusterState deleteDanglingIndex(ClusterState currentState, Index indexToDelete) { final Metadata metaData = currentState.getMetadata(); - for (ObjectObjectCursor each : metaData.indices()) { - if (indexToDelete.getUUID().equals(each.value.getIndexUUID())) { + for (Map.Entry each : metaData.indices().entrySet()) { + if (indexToDelete.getUUID().equals(each.getValue().getIndexUUID())) { throw new IllegalArgumentException( "Refusing to delete dangling index " + indexToDelete diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index 5e4787f287f4c..6e630a403384c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.gateway.LocalAllocateDangledIndices; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java index d6a6a03f39d99..011ada32b6a79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -21,7 +21,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** - * A request to delete an index. Best created with {@link org.elasticsearch.client.Requests#deleteIndexRequest(String)}. + * A request to delete an index. Best created with {@link org.elasticsearch.client.internal.Requests#deleteIndexRequest(String)}. */ public class DeleteIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 80cbce2d12898..62be381a40540 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class DeleteIndexRequestBuilder extends AcknowledgedRequestBuilder< DeleteIndexRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java index 8a213656a6351..572eb9d54f3b6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java @@ -244,26 +244,20 @@ void analyzeDocValues(SegmentReader reader, IndexDiskUsageStats stats) throws IO cancellationChecker.checkForCancellation(); directory.resetBytesRead(); switch (dvType) { - case NUMERIC: - iterateDocValues(maxDocs, () -> docValuesReader.getNumeric(field), NumericDocValues::longValue); - break; - case SORTED_NUMERIC: - iterateDocValues(maxDocs, () -> docValuesReader.getSortedNumeric(field), dv -> { - for (int i = 0; i < dv.docValueCount(); i++) { - cancellationChecker.logEvent(); - dv.nextValue(); - } - }); - break; - case BINARY: - iterateDocValues(maxDocs, () -> docValuesReader.getBinary(field), BinaryDocValues::binaryValue); - break; - case SORTED: + case NUMERIC -> iterateDocValues(maxDocs, () -> docValuesReader.getNumeric(field), NumericDocValues::longValue); + case SORTED_NUMERIC -> iterateDocValues(maxDocs, () -> docValuesReader.getSortedNumeric(field), dv -> { + for (int i = 0; i < dv.docValueCount(); i++) { + cancellationChecker.logEvent(); + dv.nextValue(); + } + }); + case BINARY -> iterateDocValues(maxDocs, () -> docValuesReader.getBinary(field), BinaryDocValues::binaryValue); + case SORTED -> { SortedDocValues sorted = iterateDocValues(maxDocs, () -> docValuesReader.getSorted(field), SortedDocValues::ordValue); sorted.lookupOrd(0); sorted.lookupOrd(sorted.getValueCount() - 1); - break; - case SORTED_SET: + } + case SORTED_SET -> { SortedSetDocValues sortedSet = iterateDocValues(maxDocs, () -> docValuesReader.getSortedSet(field), dv -> { while (dv.nextOrd() != SortedSetDocValues.NO_MORE_ORDS) { cancellationChecker.logEvent(); @@ -271,10 +265,11 @@ void analyzeDocValues(SegmentReader reader, IndexDiskUsageStats stats) throws IO }); sortedSet.lookupOrd(0); sortedSet.lookupOrd(sortedSet.getValueCount() - 1); - break; - default: + } + default -> { assert false : "Unknown docValues type [" + dvType + "]"; throw new IllegalStateException("Unknown docValues type [" + dvType + "]"); + } } stats.addDocValues(field.name, directory.getBytesRead()); } @@ -294,32 +289,20 @@ private void readProximity(Terms terms, PostingsEnum postings) throws IOExceptio private BlockTermState getBlockTermState(TermsEnum termsEnum, BytesRef term) throws IOException { if (term != null && termsEnum.seekExact(term)) { final TermState termState = termsEnum.termState(); - if (termState instanceof Lucene90PostingsFormat.IntBlockTermState) { - final Lucene90PostingsFormat.IntBlockTermState blockTermState = (Lucene90PostingsFormat.IntBlockTermState) termState; + if (termState instanceof final Lucene90PostingsFormat.IntBlockTermState blockTermState) { return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); } - if (termState instanceof Lucene84PostingsFormat.IntBlockTermState) { - final Lucene84PostingsFormat.IntBlockTermState blockTermState = (Lucene84PostingsFormat.IntBlockTermState) termState; + if (termState instanceof final Lucene84PostingsFormat.IntBlockTermState blockTermState) { return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); } - if (termState instanceof Lucene50PostingsFormat.IntBlockTermState) { - final Lucene50PostingsFormat.IntBlockTermState blockTermState = (Lucene50PostingsFormat.IntBlockTermState) termState; + if (termState instanceof final Lucene50PostingsFormat.IntBlockTermState blockTermState) { return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); } } return null; } - private static class BlockTermState { - final long docStartFP; - final long posStartFP; - final long payloadFP; - - BlockTermState(long docStartFP, long posStartFP, long payloadFP) { - this.docStartFP = docStartFP; - this.posStartFP = posStartFP; - this.payloadFP = payloadFP; - } + private record BlockTermState(long docStartFP, long posStartFP, long payloadFP) { long distance(BlockTermState other) { return this.docStartFP - other.docStartFP + this.posStartFP - other.posStartFP + this.payloadFP - other.payloadFP; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java index 97b37105d5e8d..b083f1b221a1e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java @@ -106,9 +106,8 @@ protected AnalyzeIndexDiskUsageResponse newResponse( final Map combined = new HashMap<>(); for (int i = 0; i < shardsResponses.length(); i++) { final Object r = shardsResponses.get(i); - if (r instanceof AnalyzeDiskUsageShardResponse) { + if (r instanceof AnalyzeDiskUsageShardResponse resp) { ++successfulShards; - AnalyzeDiskUsageShardResponse resp = (AnalyzeDiskUsageShardResponse) r; combined.compute(resp.getIndex(), (k, v) -> v == null ? resp.stats : v.add(resp.stats)); } else if (r instanceof DefaultShardOperationFailedException) { shardFailures.add((DefaultShardOperationFailedException) r); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java index b755119450fea..001ff67004c4c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java @@ -22,10 +22,10 @@ * by flushing data to the index storage and clearing the internal transaction log. By default, Elasticsearch uses * memory heuristics in order to automatically trigger flush operations as required in order to clear memory. *

    - * Best created with {@link org.elasticsearch.client.Requests#flushRequest(String...)}. + * Best created with {@link org.elasticsearch.client.internal.Requests#flushRequest(String...)}. * - * @see org.elasticsearch.client.Requests#flushRequest(String...) - * @see org.elasticsearch.client.IndicesAdminClient#flush(FlushRequest) + * @see org.elasticsearch.client.internal.Requests#flushRequest(String...) + * @see org.elasticsearch.client.internal.IndicesAdminClient#flush(FlushRequest) * @see FlushResponse */ public class FlushRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java index 90c8be5b74aa5..75dcced1f35a6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 951e4e461aa6e..a828f6e413d77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 082645f574770..af416fa95a862 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -28,8 +28,8 @@ * to force merge down to. Defaults to simply checking if a merge needs * to execute, and if so, executes it * - * @see org.elasticsearch.client.Requests#forceMergeRequest(String...) - * @see org.elasticsearch.client.IndicesAdminClient#forceMerge(ForceMergeRequest) + * @see org.elasticsearch.client.internal.Requests#forceMergeRequest(String...) + * @see org.elasticsearch.client.internal.IndicesAdminClient#forceMerge(ForceMergeRequest) * @see ForceMergeResponse */ public class ForceMergeRequest extends BroadcastRequest { @@ -43,6 +43,10 @@ public static final class Defaults { private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS; private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES; private boolean flush = Defaults.FLUSH; + /** + * Should this task store its result? + */ + private boolean shouldStoreResult; private static final Version FORCE_MERGE_UUID_SIMPLE_VERSION = Version.V_8_0_0; @@ -131,6 +135,19 @@ public ForceMergeRequest flush(boolean flush) { return this; } + /** + * Should this task store its result after it has finished? + */ + public ForceMergeRequest setShouldStoreResult(boolean shouldStoreResult) { + this.shouldStoreResult = shouldStoreResult; + return this; + } + + @Override + public boolean getShouldStoreResult() { + return shouldStoreResult; + } + @Override public String getDescription() { return "Force-merge indices " diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index acacb97572a8f..4e24eca579be1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * A request to force merge one or more indices. In order to force merge all diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java index fb47b7435e018..53e0d8f69fd26 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java index 0be7828ba33a8..6492c23e4924d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; /** A helper class to build {@link GetFieldMappingsRequest} objects */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index 7787dd0b95122..f919be22143f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -122,7 +122,7 @@ private void addFieldMappingsToBuilder(XContentBuilder builder, Params params, M } } - public static class FieldMappingMetadata implements ToXContentFragment { + public record FieldMappingMetadata(String fullName, BytesReference source) implements ToXContentFragment { private static final ParseField FULL_NAME = new ParseField("full_name"); private static final ParseField MAPPING = new ParseField("mapping"); @@ -133,28 +133,13 @@ public static class FieldMappingMetadata implements ToXContentFragment { a -> new FieldMappingMetadata((String) a[0], (BytesReference) a[1]) ); - private final String fullName; - private final BytesReference source; - - public FieldMappingMetadata(String fullName, BytesReference source) { - this.fullName = fullName; - this.source = source; - } - - public String fullName() { - return fullName; - } - - /** Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. */ + /** + * Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. + */ public Map sourceAsMap() { return XContentHelper.convertToMap(source, true, XContentType.JSON).v2(); } - // pkg-private for testing - BytesReference getSource() { - return source; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(FULL_NAME.getPreferredName(), fullName); @@ -167,24 +152,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } return builder; } - - @Override - public String toString() { - return "FieldMappingMetadata{fullName='" + fullName + '\'' + ", source=" + source + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if ((o instanceof FieldMappingMetadata) == false) return false; - FieldMappingMetadata that = (FieldMappingMetadata) o; - return Objects.equals(fullName, that.fullName) && Objects.equals(source, that.source); - } - - @Override - public int hashCode() { - return Objects.hash(fullName, source); - } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 0a3720bd5b8a7..0bf65b27328d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder< GetMappingsRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index 3e9b3870d9522..dcbd309f54d05 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -24,6 +22,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Map; import static org.elasticsearch.rest.BaseRestHandler.DEFAULT_INCLUDE_TYPE_NAME_POLICY; import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; @@ -68,19 +67,19 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - for (final ObjectObjectCursor indexEntry : getMappings()) { - builder.startObject(indexEntry.key); + for (final Map.Entry indexEntry : getMappings().entrySet()) { + builder.startObject(indexEntry.getKey()); boolean includeTypeName = params.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY); - if (builder.getRestApiVersion() == RestApiVersion.V_7 && includeTypeName && indexEntry.value != null) { + if (builder.getRestApiVersion() == RestApiVersion.V_7 && includeTypeName && indexEntry.getValue() != null) { builder.startObject(MAPPINGS.getPreferredName()); - if (indexEntry.value != MappingMetadata.EMPTY_MAPPINGS) { - builder.field(MapperService.SINGLE_MAPPING_NAME, indexEntry.value.sourceAsMap()); + if (indexEntry.getValue() != MappingMetadata.EMPTY_MAPPINGS) { + builder.field(MapperService.SINGLE_MAPPING_NAME, indexEntry.getValue().sourceAsMap()); } builder.endObject(); - } else if (indexEntry.value != null) { - builder.field(MAPPINGS.getPreferredName(), indexEntry.value.sourceAsMap()); + } else if (indexEntry.getValue() != null) { + builder.field(MAPPINGS.getPreferredName(), indexEntry.getValue().sourceAsMap()); } else { builder.startObject(MAPPINGS.getPreferredName()).endObject(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index 39f2440b5b963..95c1136fd265f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -87,8 +87,7 @@ private GetFieldMappingsResponse merge(AtomicReferenceArray indexRespons Map> mergedResponses = new HashMap<>(); for (int i = 0; i < indexResponses.length(); i++) { Object element = indexResponses.get(i); - if (element instanceof GetFieldMappingsResponse) { - GetFieldMappingsResponse response = (GetFieldMappingsResponse) element; + if (element instanceof GetFieldMappingsResponse response) { mergedResponses.putAll(response.mappings()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java index e59653f4d8d29..635412c8be2e8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java @@ -21,7 +21,7 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda private final CompressedXContent source; public PutMappingClusterStateUpdateRequest(String source) throws IOException { - this.source = new CompressedXContent(source); + this.source = CompressedXContent.fromJSON(source); } public CompressedXContent source() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 47af2bfe79692..bddbe8332af65 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -40,13 +40,13 @@ /** * Puts mapping definition into one or more indices. Best created with - * {@link org.elasticsearch.client.Requests#putMappingRequest(String...)}. + * {@link org.elasticsearch.client.internal.Requests#putMappingRequest(String...)}. *

    * If the mappings already exists, the new mappings will be merged with the new one. If there are elements * that can't be merged are detected, the request will be rejected. * - * @see org.elasticsearch.client.Requests#putMappingRequest(String...) - * @see org.elasticsearch.client.IndicesAdminClient#putMapping(PutMappingRequest) + * @see org.elasticsearch.client.internal.Requests#putMappingRequest(String...) + * @see org.elasticsearch.client.internal.IndicesAdminClient#putMapping(PutMappingRequest) * @see AcknowledgedResponse */ public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index b9dd262e49f08..34b46bb8e090f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java index 5e0b04d17c57b..060ead9deb246 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for for open index request diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java index d9085aeaafbc4..9db890428e456 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock; /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryRequestBuilder.java index 79ecfb795b520..1a38aff03a824 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.recovery; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * Recovery information request builder. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java index f09da34705914..d332ffe7c9957 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java @@ -18,8 +18,8 @@ * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by * default a refresh is scheduled periodically. * - * @see org.elasticsearch.client.Requests#refreshRequest(String...) - * @see org.elasticsearch.client.IndicesAdminClient#refresh(RefreshRequest) + * @see org.elasticsearch.client.internal.Requests#refreshRequest(String...) + * @see org.elasticsearch.client.internal.IndicesAdminClient#refresh(RefreshRequest) * @see RefreshResponse */ public class RefreshRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java index 8dd3ae9756473..98537e2c25b57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * A refresh request making all operations performed since the last refresh available for search. The (near) real-time diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 440003d16fad8..ff9f6640b4120 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index 7ca7001bd547b..1f4ef2b7a7c87 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexAbstractionResolver; @@ -569,10 +569,9 @@ private static void enrichIndexAbstraction( IndexAbstraction ia = lookup.get(indexAbstraction); if (ia != null) { switch (ia.getType()) { - case CONCRETE_INDEX: + case CONCRETE_INDEX -> { IndexMetadata writeIndex = metadata.index(ia.getWriteIndex()); String[] aliasNames = writeIndex.getAliases().keySet().stream().sorted().toArray(String[]::new); - List attributes = new ArrayList<>(); attributes.add(writeIndex.getState() == IndexMetadata.State.OPEN ? "open" : "closed"); if (ia.isHidden()) { @@ -583,7 +582,6 @@ private static void enrichIndexAbstraction( attributes.add("frozen"); } attributes.sort(String::compareTo); - indices.add( new ResolvedIndex( ia.getName(), @@ -592,13 +590,13 @@ private static void enrichIndexAbstraction( ia.getParentDataStream() == null ? null : ia.getParentDataStream().getName() ) ); - break; - case ALIAS: + } + case ALIAS -> { String[] indexNames = ia.getIndices().stream().map(Index::getName).toArray(String[]::new); Arrays.sort(indexNames); aliases.add(new ResolvedAlias(ia.getName(), indexNames)); - break; - case DATA_STREAM: + } + case DATA_STREAM -> { IndexAbstraction.DataStream dataStream = (IndexAbstraction.DataStream) ia; String[] backingIndices = dataStream.getIndices().stream().map(Index::getName).toArray(String[]::new); dataStreams.add( @@ -608,9 +606,8 @@ private static void enrichIndexAbstraction( dataStream.getDataStream().getTimeStampField().getName() ) ); - break; - default: - throw new IllegalStateException("unknown index abstraction type: " + ia.getType()); + } + default -> throw new IllegalStateException("unknown index abstraction type: " + ia.getType()); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index e1daba9905262..e186b737808e4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -70,32 +70,10 @@ public String name() { /** * Holder for index stats used to evaluate conditions */ - public static class Stats { - public final long numDocs; - public final long indexCreated; - public final ByteSizeValue indexSize; - public final ByteSizeValue maxPrimaryShardSize; - public final long maxShardDocs; - - public Stats(long numDocs, long indexCreated, ByteSizeValue indexSize, ByteSizeValue maxPrimaryShardSize, long maxShardDocs) { - this.numDocs = numDocs; - this.indexCreated = indexCreated; - this.indexSize = indexSize; - this.maxPrimaryShardSize = maxPrimaryShardSize; - this.maxShardDocs = maxShardDocs; - } - } + public record Stats(long numDocs, long indexCreated, ByteSizeValue indexSize, ByteSizeValue maxPrimaryShardSize, long maxShardDocs) {} /** * Holder for evaluated condition result */ - public static class Result { - public final Condition condition; - public final boolean matched; - - protected Result(Condition condition, boolean matched) { - this.condition = condition; - this.matched = matched; - } - } + public record Result(Condition condition, boolean matched) {} } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java index 2da69d3349b96..22d68feecf60c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java @@ -35,7 +35,7 @@ public MaxAgeCondition(StreamInput in) throws IOException { @Override public Result evaluate(final Stats stats) { - long indexAge = System.currentTimeMillis() - stats.indexCreated; + long indexAge = System.currentTimeMillis() - stats.indexCreated(); return new Result(this, this.value.getMillis() <= indexAge); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java index efe304d64a146..0348190eebdef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java @@ -34,7 +34,7 @@ public MaxDocsCondition(StreamInput in) throws IOException { @Override public Result evaluate(final Stats stats) { - return new Result(this, this.value <= stats.numDocs); + return new Result(this, this.value <= stats.numDocs()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardSizeCondition.java index df69d0563df41..131b721965fc2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardSizeCondition.java @@ -36,7 +36,7 @@ public MaxPrimaryShardSizeCondition(StreamInput in) throws IOException { @Override public Result evaluate(Stats stats) { - return new Result(this, stats.maxPrimaryShardSize.getBytes() >= value.getBytes()); + return new Result(this, stats.maxPrimaryShardSize().getBytes() >= value.getBytes()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxShardDocsCondition.java index e258da4bc2f94..6de2f29f92b16 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxShardDocsCondition.java @@ -30,7 +30,7 @@ public MaxShardDocsCondition(StreamInput in) throws IOException { @Override public Result evaluate(Stats stats) { - return new Result(this, this.value <= stats.maxShardDocs); + return new Result(this, this.value <= stats.maxShardDocs()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java index 9ed097b4e99d0..8284c4c0f4c45 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java @@ -36,7 +36,7 @@ public MaxSizeCondition(StreamInput in) throws IOException { @Override public Result evaluate(Stats stats) { - return new Result(this, stats.indexSize.getBytes() >= value.getBytes()); + return new Result(this, stats.indexSize().getBytes() >= value.getBytes()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index e7b148c9a179d..4f08d6a50eda4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -35,6 +35,7 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; +import java.time.Instant; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -60,7 +61,6 @@ public class MetadataRolloverService { private final ThreadPool threadPool; private final MetadataCreateIndexService createIndexService; private final MetadataIndexAliasesService indexAliasesService; - private final IndexNameExpressionResolver indexNameExpressionResolver; private final SystemIndices systemIndices; @Inject @@ -68,26 +68,15 @@ public MetadataRolloverService( ThreadPool threadPool, MetadataCreateIndexService createIndexService, MetadataIndexAliasesService indexAliasesService, - IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices ) { this.threadPool = threadPool; this.createIndexService = createIndexService; this.indexAliasesService = indexAliasesService; - this.indexNameExpressionResolver = indexNameExpressionResolver; this.systemIndices = systemIndices; } - public static class RolloverResult { - public final String rolloverIndexName; - public final String sourceIndexName; - public final ClusterState clusterState; - - private RolloverResult(String rolloverIndexName, String sourceIndexName, ClusterState clusterState) { - this.rolloverIndexName = rolloverIndexName; - this.sourceIndexName = sourceIndexName; - this.clusterState = clusterState; - } + public record RolloverResult(String rolloverIndexName, String sourceIndexName, ClusterState clusterState) { @Override public String toString() { @@ -107,37 +96,37 @@ public RolloverResult rolloverClusterState( String newIndexName, CreateIndexRequest createIndexRequest, List> metConditions, + Instant now, boolean silent, boolean onlyValidate ) throws Exception { validate(currentState.metadata(), rolloverTarget, newIndexName, createIndexRequest); final IndexAbstraction indexAbstraction = currentState.metadata().getIndicesLookup().get(rolloverTarget); - switch (indexAbstraction.getType()) { - case ALIAS: - return rolloverAlias( - currentState, - (IndexAbstraction.Alias) indexAbstraction, - rolloverTarget, - newIndexName, - createIndexRequest, - metConditions, - silent, - onlyValidate - ); - case DATA_STREAM: - return rolloverDataStream( - currentState, - (IndexAbstraction.DataStream) indexAbstraction, - rolloverTarget, - createIndexRequest, - metConditions, - silent, - onlyValidate - ); - default: + return switch (indexAbstraction.getType()) { + case ALIAS -> rolloverAlias( + currentState, + (IndexAbstraction.Alias) indexAbstraction, + rolloverTarget, + newIndexName, + createIndexRequest, + metConditions, + silent, + onlyValidate + ); + case DATA_STREAM -> rolloverDataStream( + currentState, + (IndexAbstraction.DataStream) indexAbstraction, + rolloverTarget, + createIndexRequest, + metConditions, + now, + silent, + onlyValidate + ); + default -> // the validate method above prevents this case throw new IllegalStateException("unable to roll over type [" + indexAbstraction.getType().getDisplayName() + "]"); - } + }; } public void validateIndexName(ClusterState state, String index) { @@ -155,39 +144,24 @@ public NameResolution resolveRolloverNames( ) { validate(currentState.metadata(), rolloverTarget, newIndexName, createIndexRequest); final IndexAbstraction indexAbstraction = currentState.metadata().getIndicesLookup().get(rolloverTarget); - switch (indexAbstraction.getType()) { - case ALIAS: - return resolveAliasRolloverNames(currentState.metadata(), indexAbstraction, newIndexName); - case DATA_STREAM: - return resolveDataStreamRolloverNames(currentState.getMetadata(), (IndexAbstraction.DataStream) indexAbstraction); - default: + return switch (indexAbstraction.getType()) { + case ALIAS -> resolveAliasRolloverNames(currentState.metadata(), indexAbstraction, newIndexName); + case DATA_STREAM -> resolveDataStreamRolloverNames(currentState.getMetadata(), (IndexAbstraction.DataStream) indexAbstraction); + default -> // the validate method above prevents this case throw new IllegalStateException("unable to roll over type [" + indexAbstraction.getType().getDisplayName() + "]"); - } + }; } - public static class NameResolution { - final String sourceName; - @Nullable - final String unresolvedName; - final String rolloverName; - - NameResolution(String sourceName, String unresolvedName, String rolloverName) { - this.sourceName = sourceName; - this.unresolvedName = unresolvedName; - this.rolloverName = rolloverName; - } - } + public record NameResolution(String sourceName, @Nullable String unresolvedName, String rolloverName) {} private NameResolution resolveAliasRolloverNames(Metadata metadata, IndexAbstraction alias, String newIndexName) { final IndexMetadata writeIndex = metadata.index(alias.getWriteIndex()); final String sourceProvidedName = writeIndex.getSettings() .get(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, writeIndex.getIndex().getName()); final String sourceIndexName = writeIndex.getIndex().getName(); - final String unresolvedName = (newIndexName != null) - ? newIndexName - : generateRolloverIndexName(sourceProvidedName, indexNameExpressionResolver); - final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName); + final String unresolvedName = (newIndexName != null) ? newIndexName : generateRolloverIndexName(sourceProvidedName); + final String rolloverIndexName = IndexNameExpressionResolver.resolveDateMathExpression(unresolvedName); return new NameResolution(sourceIndexName, unresolvedName, rolloverIndexName); } @@ -252,6 +226,7 @@ private RolloverResult rolloverDataStream( String dataStreamName, CreateIndexRequest createIndexRequest, List> metConditions, + Instant now, boolean silent, boolean onlyValidate ) throws Exception { @@ -292,7 +267,8 @@ private RolloverResult rolloverDataStream( dataStreamName, newWriteIndexName, createIndexRequest, - systemDataStreamDescriptor + systemDataStreamDescriptor, + now ); ClusterState newState = createIndexService.applyCreateIndexRequest( currentState, @@ -312,8 +288,8 @@ private RolloverResult rolloverDataStream( return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), newState); } - static String generateRolloverIndexName(String sourceIndexName, IndexNameExpressionResolver indexNameExpressionResolver) { - String resolvedName = indexNameExpressionResolver.resolveDateMathExpression(sourceIndexName); + static String generateRolloverIndexName(String sourceIndexName) { + String resolvedName = IndexNameExpressionResolver.resolveDateMathExpression(sourceIndexName); final boolean isDateMath = sourceIndexName.equals(resolvedName) == false; if (INDEX_NAME_PATTERN.matcher(resolvedName).matches()) { int numberIndex = sourceIndexName.lastIndexOf("-"); @@ -335,11 +311,13 @@ static CreateIndexClusterStateUpdateRequest prepareDataStreamCreateIndexRequest( final String dataStreamName, final String targetIndexName, CreateIndexRequest createIndexRequest, - final SystemDataStreamDescriptor descriptor + final SystemDataStreamDescriptor descriptor, + Instant now ) { Settings settings = descriptor != null ? Settings.EMPTY : HIDDEN_INDEX_SETTINGS; return prepareCreateIndexRequest(targetIndexName, targetIndexName, "rollover_data_stream", createIndexRequest, settings) .dataStreamName(dataStreamName) + .nameResolvedInstant(now.toEpochMilli()) .systemDataStreamDescriptor(descriptor); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index b1455bb6fe583..53876d9374688 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.MapperService; @@ -24,7 +25,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; @@ -128,7 +128,7 @@ public class RolloverRequest extends AcknowledgedRequest implem private String rolloverTarget; private String newIndexName; private boolean dryRun; - private final Map> conditions = new HashMap<>(2); + private final Map> conditions = Maps.newMapWithExpectedSize(2); // the index name "_na_" is never read back, what matters are settings, mappings and aliases private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java index d3919a5999ec4..68d653eb95929 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 0e0e765f77ba5..9c37addf7b554 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -11,12 +11,12 @@ import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -48,7 +48,7 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement oldIndex = in.readString(); newIndex = in.readString(); int conditionSize = in.readVInt(); - conditionStatus = new HashMap<>(conditionSize); + conditionStatus = Maps.newMapWithExpectedSize(conditionSize); for (int i = 0; i < conditionSize; i++) { conditionStatus.put(in.readString(), in.readBoolean()); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 7f363556060bb..570c58dd40144 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.action.support.ActiveShardsObserver; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; @@ -43,6 +43,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.time.Instant; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -57,7 +58,6 @@ public class TransportRolloverAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportRolloverAction.class); - private static final ClusterStateTaskConfig ROLLOVER_TASK_CONFIG = ClusterStateTaskConfig.build(Priority.NORMAL); private final MetadataRolloverService rolloverService; private final ActiveShardsObserver activeShardsObserver; @@ -141,8 +141,8 @@ protected void masterOperation( rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest() ); - final String trialSourceIndexName = trialRolloverNames.sourceName; - final String trialRolloverIndexName = trialRolloverNames.rolloverName; + final String trialSourceIndexName = trialRolloverNames.sourceName(); + final String trialRolloverIndexName = trialRolloverNames.rolloverName(); rolloverService.validateIndexName(oldState, trialRolloverIndexName); @@ -180,7 +180,8 @@ protected void masterOperation( if (trialConditionResults.size() == 0 || trialMetConditions.size() > 0) { String source = "rollover_index source [" + trialRolloverIndexName + "] to target [" + trialRolloverIndexName + "]"; RolloverTask rolloverTask = new RolloverTask(rolloverRequest, statsResponse, trialRolloverResponse, listener); - clusterService.submitStateUpdateTask(source, rolloverTask, ROLLOVER_TASK_CONFIG, rolloverTaskExecutor, rolloverTask); + ClusterStateTaskConfig config = ClusterStateTaskConfig.build(Priority.NORMAL, rolloverRequest.masterNodeTimeout()); + clusterService.submitStateUpdateTask(source, rolloverTask, config, rolloverTaskExecutor); } else { // conditions not met listener.onResponse(trialRolloverResponse); @@ -195,7 +196,7 @@ static Map evaluateConditions(final Collection> co if (stats != null) { return conditions.stream() .map(condition -> condition.evaluate(stats)) - .collect(Collectors.toMap(result -> result.condition.toString(), result -> result.matched)); + .collect(Collectors.toMap(result -> result.condition().toString(), Condition.Result::matched)); } else { // no conditions matched return conditions.stream().collect(Collectors.toMap(Condition::toString, cond -> false)); @@ -277,7 +278,7 @@ ClusterState performRollover(ClusterState currentState) throws Exception { rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest() ); - final String sourceIndexName = rolloverNames.sourceName; + final String sourceIndexName = rolloverNames.sourceName(); // Re-evaluate the conditions, now with our final source index name final Map postConditionResults = evaluateConditions( @@ -301,6 +302,7 @@ ClusterState performRollover(ClusterState currentState) throws Exception { rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), metConditions, + Instant.now(), false, false ); @@ -311,11 +313,11 @@ ClusterState performRollover(ClusterState currentState) throws Exception { // even though we're single threaded, it's possible for the // rollover names generated before the actual rollover to be // different due to things like date resolution - sourceIndex.set(rolloverResult.sourceIndexName); - rolloverIndex.set(rolloverResult.rolloverIndexName); + sourceIndex.set(rolloverResult.sourceIndexName()); + rolloverIndex.set(rolloverResult.rolloverIndexName()); // Return the new rollover cluster state, which includes the changes that create the new index - return rolloverResult.clusterState; + return rolloverResult.clusterState(); } else { // Upon re-evaluation of the conditions, none were met, so // therefore do not perform a rollover, returning the current @@ -325,12 +327,12 @@ ClusterState performRollover(ClusterState currentState) throws Exception { } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { listener.onFailure(e); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { // Now assuming we have a new state and the name of the rolled over index, we need to wait for the // configured number of active shards, as well as return the names of the indices that were rolled/created if (clusterStateProcessed) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java index afe0e4855f065..9c100fd7e2ee9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java @@ -8,36 +8,19 @@ package org.elasticsearch.action.admin.indices.segments; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.index.shard.ShardId; -import java.util.Arrays; import java.util.Iterator; -public class IndexShardSegments implements Iterable { - - private final ShardId shardId; - - private final ShardSegments[] shards; - - IndexShardSegments(ShardId shardId, ShardSegments[] shards) { - this.shardId = shardId; - this.shards = shards; - } - - public ShardId getShardId() { - return this.shardId; - } +public record IndexShardSegments(ShardId shardId, ShardSegments[] shards) implements Iterable { public ShardSegments getAt(int i) { return shards[i]; } - public ShardSegments[] getShards() { - return this.shards; - } - @Override public Iterator iterator() { - return Arrays.stream(shards).iterator(); + return Iterators.forArray(shards); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index 165ed7c58c94b..9dfe66603173d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -87,7 +87,7 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.startObject(Fields.SHARDS); for (IndexShardSegments indexSegment : indexSegments) { - builder.startArray(Integer.toString(indexSegment.getShardId().id())); + builder.startArray(Integer.toString(indexSegment.shardId().id())); for (ShardSegments shardSegments : indexSegment) { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java index cead1ff441879..908564071f39c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.segments; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesSegmentsRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index ab663dd408dc7..f3e560daf3772 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder< diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index f2b512c86e402..f0560e5379548 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.admin.indices.settings.get; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -109,14 +107,9 @@ private static void parseSettingsField( if (parser.currentToken() == XContentParser.Token.START_OBJECT) { switch (parser.currentName()) { - case "settings": - indexToSettings.put(currentIndexName, Settings.fromXContent(parser)); - break; - case "defaults": - indexToDefaultSettings.put(currentIndexName, Settings.fromXContent(parser)); - break; - default: - parser.skipChildren(); + case "settings" -> indexToSettings.put(currentIndexName, Settings.fromXContent(parser)); + case "defaults" -> indexToDefaultSettings.put(currentIndexName, Settings.fromXContent(parser)); + default -> parser.skipChildren(); } } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { parser.skipChildren(); @@ -184,18 +177,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws private XContentBuilder toXContent(XContentBuilder builder, Params params, boolean omitEmptySettings) throws IOException { builder.startObject(); - for (ObjectObjectCursor cursor : getIndexToSettings()) { + for (Map.Entry cursor : getIndexToSettings().entrySet()) { // no settings, jump over it to shorten the response data - if (omitEmptySettings && cursor.value.isEmpty()) { + if (omitEmptySettings && cursor.getValue().isEmpty()) { continue; } - builder.startObject(cursor.key); + builder.startObject(cursor.getKey()); builder.startObject("settings"); - cursor.value.toXContent(builder, params); + cursor.getValue().toXContent(builder, params); builder.endObject(); if (indexToDefaultSettings.isEmpty() == false) { builder.startObject("defaults"); - indexToDefaultSettings.get(cursor.key).toXContent(builder, params); + indexToDefaultSettings.get(cursor.getKey()).toXContent(builder, params); builder.endObject(); } builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index f2e174c092dc0..529e3bd8a8bae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -39,6 +39,8 @@ import java.util.Objects; import java.util.stream.Collectors; +import static org.elasticsearch.indices.SystemIndexManager.MANAGED_SYSTEM_INDEX_SETTING_UPDATE_ALLOWLIST; + public class TransportUpdateSettingsAction extends AcknowledgedTransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportUpdateSettingsAction.class); @@ -109,10 +111,10 @@ protected void masterOperation( return; } - final List hiddenSystemIndexViolations = checkForHidingSystemIndex(concreteIndices, request); - if (hiddenSystemIndexViolations.isEmpty() == false) { - final String message = "Cannot set [index.hidden] to 'true' on system indices: " - + hiddenSystemIndexViolations.stream().map(entry -> "[" + entry + "]").collect(Collectors.joining(", ")); + final List unhiddenSystemIndexViolations = checkForUnhidingSystemIndex(concreteIndices, request); + if (unhiddenSystemIndexViolations.isEmpty() == false) { + final String message = "Cannot set [index.hidden] to 'false' on system indices: " + + unhiddenSystemIndexViolations.stream().map(entry -> "[" + entry + "]").collect(Collectors.joining(", ")); logger.warn(message); listener.onFailure(new IllegalStateException(message)); return; @@ -156,6 +158,10 @@ private Map> checkForSystemIndexViolations(Index[] concrete final Settings descriptorSettings = descriptor.getSettings(); List failedKeys = new ArrayList<>(); for (String key : requestSettings.keySet()) { + if (MANAGED_SYSTEM_INDEX_SETTING_UPDATE_ALLOWLIST.contains(key)) { + // Don't check the setting if it's on the allowlist. + continue; + } final String expectedValue = descriptorSettings.get(key); final String actualValue = requestSettings.get(key); @@ -174,17 +180,17 @@ private Map> checkForSystemIndexViolations(Index[] concrete } /** - * Checks that the request isn't trying to add the "hidden" setting to a system + * Checks that the request isn't trying to remove the "hidden" setting on a system * index * * @param concreteIndices the indices being updated * @param request the update request - * @return a list of system indexes that this request would set to hidden + * @return a list of system indexes that this request would make visible */ - private List checkForHidingSystemIndex(Index[] concreteIndices, UpdateSettingsRequest request) { + private List checkForUnhidingSystemIndex(Index[] concreteIndices, UpdateSettingsRequest request) { // Requests that a cluster generates itself are permitted to have a difference in settings // so that rolling upgrade scenarios still work. We check this via the request's origin. - if (request.settings().getAsBoolean(IndexMetadata.SETTING_INDEX_HIDDEN, false) == false) { + if (request.settings().getAsBoolean(IndexMetadata.SETTING_INDEX_HIDDEN, true)) { return List.of(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 1e64c26f91b49..c032d062bfe4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentType; @@ -83,4 +83,12 @@ public UpdateSettingsRequestBuilder setPreserveExisting(boolean preserveExisting request.setPreserveExisting(preserveExisting); return this; } + + /** + * Sets the origin to use, only set this when the settings update is requested by ES internal processes. + */ + public UpdateSettingsRequestBuilder origin(String origin) { + request.origin(origin); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java index 1be7faa591cb7..a5c6c27964a9e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.health.ClusterHealthStatus; /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index ecc1f76394edc..efa81c72615ce 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -8,9 +8,6 @@ package org.elasticsearch.action.admin.indices.shards; -import com.carrotsearch.hppc.cursors.IntObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; @@ -26,6 +23,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Map; /** * Response for {@link IndicesShardStoresAction} @@ -71,29 +69,21 @@ public enum AllocationStatus { } private static AllocationStatus fromId(byte id) { - switch (id) { - case 0: - return PRIMARY; - case 1: - return REPLICA; - case 2: - return UNUSED; - default: - throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); - } + return switch (id) { + case 0 -> PRIMARY; + case 1 -> REPLICA; + case 2 -> UNUSED; + default -> throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); + }; } public String value() { - switch (id) { - case 0: - return "primary"; - case 1: - return "replica"; - case 2: - return "unused"; - default: - throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); - } + return switch (id) { + case 0 -> "primary"; + case 1 -> "replica"; + case 2 -> "unused"; + default -> throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); + }; } private static AllocationStatus readFrom(StreamInput in) throws IOException { @@ -290,9 +280,9 @@ public List getFailures() { public void writeTo(StreamOutput out) throws IOException { out.writeMap(storeStatuses, StreamOutput::writeString, (o, v) -> { o.writeVInt(v.size()); - for (IntObjectCursor> shardStatusesEntry : v) { - o.writeInt(shardStatusesEntry.key); - o.writeCollection(shardStatusesEntry.value); + for (Map.Entry> shardStatusesEntry : v.entrySet()) { + o.writeInt(shardStatusesEntry.getKey()); + o.writeCollection(shardStatusesEntry.getValue()); } }); out.writeList(failures); @@ -309,14 +299,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.startObject(Fields.INDICES); - for (ObjectObjectCursor>> indexShards : storeStatuses) { - builder.startObject(indexShards.key); + for (Map.Entry>> indexShards : storeStatuses.entrySet()) { + builder.startObject(indexShards.getKey()); builder.startObject(Fields.SHARDS); - for (IntObjectCursor> shardStatusesEntry : indexShards.value) { - builder.startObject(String.valueOf(shardStatusesEntry.key)); + for (Map.Entry> shardStatusesEntry : indexShards.getValue().entrySet()) { + builder.startObject(String.valueOf(shardStatusesEntry.getKey())); builder.startArray(Fields.STORES); - for (StoreStatus storeStatus : shardStatusesEntry.value) { + for (StoreStatus storeStatus : shardStatusesEntry.getValue()) { builder.startObject(); storeStatus.toXContent(builder, params); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 9beb6dd727caf..90715e8c879ae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -15,7 +15,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.action.support.nodes.BaseNodesResponse; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java index b0670d5214c5d..172f84e15d700 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index efea8a1e9c701..173a0f7b2bb9d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -20,13 +20,14 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -115,8 +116,8 @@ protected void masterOperation( ) { // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code - final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); - final String targetIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); + final String sourceIndex = IndexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); + final String targetIndex = IndexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); final IndexMetadata sourceMetadata = state.metadata().index(sourceIndex); if (sourceMetadata == null) { @@ -124,6 +125,11 @@ protected void masterOperation( return; } + // Index splits are not allowed for time-series indices + if (resizeRequest.getResizeType() == ResizeType.SPLIT) { + IndexRouting.fromIndexMetadata(sourceMetadata).checkIndexSplitAllowed(); + } + IndicesStatsRequestBuilder statsRequestBuilder = client.admin() .indices() .prepareStats(sourceIndex) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 999d516cb91d8..e12812bb8c311 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -109,62 +109,25 @@ public CommonStats(CommonStatsFlags flags) { for (CommonStatsFlags.Flag flag : setFlags) { switch (flag) { - case Docs: - docs = new DocsStats(); - break; - case Store: - store = new StoreStats(); - break; - case Indexing: - indexing = new IndexingStats(); - break; - case Get: - get = new GetStats(); - break; - case Search: - search = new SearchStats(); - break; - case Merge: - merge = new MergeStats(); - break; - case Refresh: - refresh = new RefreshStats(); - break; - case Flush: - flush = new FlushStats(); - break; - case Warmer: - warmer = new WarmerStats(); - break; - case QueryCache: - queryCache = new QueryCacheStats(); - break; - case FieldData: - fieldData = new FieldDataStats(); - break; - case Completion: - completion = new CompletionStats(); - break; - case Segments: - segments = new SegmentsStats(); - break; - case Translog: - translog = new TranslogStats(); - break; - case RequestCache: - requestCache = new RequestCacheStats(); - break; - case Recovery: - recoveryStats = new RecoveryStats(); - break; - case Bulk: - bulk = new BulkStats(); - break; - case Shards: - shards = new ShardCountStats(); - break; - default: - throw new IllegalStateException("Unknown Flag: " + flag); + case Docs -> docs = new DocsStats(); + case Store -> store = new StoreStats(); + case Indexing -> indexing = new IndexingStats(); + case Get -> get = new GetStats(); + case Search -> search = new SearchStats(); + case Merge -> merge = new MergeStats(); + case Refresh -> refresh = new RefreshStats(); + case Flush -> flush = new FlushStats(); + case Warmer -> warmer = new WarmerStats(); + case QueryCache -> queryCache = new QueryCacheStats(); + case FieldData -> fieldData = new FieldDataStats(); + case Completion -> completion = new CompletionStats(); + case Segments -> segments = new SegmentsStats(); + case Translog -> translog = new TranslogStats(); + case RequestCache -> requestCache = new RequestCacheStats(); + case Recovery -> recoveryStats = new RecoveryStats(); + case Bulk -> bulk = new BulkStats(); + case Shards -> shards = new ShardCountStats(); + default -> throw new IllegalStateException("Unknown Flag: " + flag); } } } @@ -174,63 +137,27 @@ public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, C for (CommonStatsFlags.Flag flag : setFlags) { try { switch (flag) { - case Docs: - docs = indexShard.docStats(); - break; - case Store: - store = indexShard.storeStats(); - break; - case Indexing: - indexing = indexShard.indexingStats(); - break; - case Get: - get = indexShard.getStats(); - break; - case Search: - search = indexShard.searchStats(flags.groups()); - break; - case Merge: - merge = indexShard.mergeStats(); - break; - case Refresh: - refresh = indexShard.refreshStats(); - break; - case Flush: - flush = indexShard.flushStats(); - break; - case Warmer: - warmer = indexShard.warmerStats(); - break; - case QueryCache: - queryCache = indicesQueryCache.getStats(indexShard.shardId()); - break; - case FieldData: - fieldData = indexShard.fieldDataStats(flags.fieldDataFields()); - break; - case Completion: - completion = indexShard.completionStats(flags.completionDataFields()); - break; - case Segments: - segments = indexShard.segmentStats(flags.includeSegmentFileSizes(), flags.includeUnloadedSegments()); - break; - case Translog: - translog = indexShard.translogStats(); - break; - case RequestCache: - requestCache = indexShard.requestCache().stats(); - break; - case Recovery: - recoveryStats = indexShard.recoveryStats(); - break; - case Bulk: - bulk = indexShard.bulkStats(); - break; - case Shards: + case Docs -> docs = indexShard.docStats(); + case Store -> store = indexShard.storeStats(); + case Indexing -> indexing = indexShard.indexingStats(); + case Get -> get = indexShard.getStats(); + case Search -> search = indexShard.searchStats(flags.groups()); + case Merge -> merge = indexShard.mergeStats(); + case Refresh -> refresh = indexShard.refreshStats(); + case Flush -> flush = indexShard.flushStats(); + case Warmer -> warmer = indexShard.warmerStats(); + case QueryCache -> queryCache = indicesQueryCache.getStats(indexShard.shardId()); + case FieldData -> fieldData = indexShard.fieldDataStats(flags.fieldDataFields()); + case Completion -> completion = indexShard.completionStats(flags.completionDataFields()); + case Segments -> segments = indexShard.segmentStats(flags.includeSegmentFileSizes(), flags.includeUnloadedSegments()); + case Translog -> translog = indexShard.translogStats(); + case RequestCache -> requestCache = indexShard.requestCache().stats(); + case Recovery -> recoveryStats = indexShard.recoveryStats(); + case Bulk -> bulk = indexShard.bulkStats(); + case Shards -> // Setting to 1 because the single IndexShard passed to this method implies 1 shard shards = new ShardCountStats(1); - break; - default: - throw new IllegalStateException("Unknown Flag: " + flag); + default -> throw new IllegalStateException("Unknown Flag: " + flag); } } catch (AlreadyClosedException e) { // shard is closed - no stats is fine diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java index 5b52a7209ef71..18a9c9d1ba27a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java @@ -8,13 +8,13 @@ package org.elasticsearch.action.admin.indices.stats; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; public class IndexShardStats implements Iterable, Writeable { @@ -47,7 +47,7 @@ public ShardStats getAt(int position) { @Override public Iterator iterator() { - return Arrays.stream(shards).iterator(); + return Iterators.forArray(shards); } private CommonStats total = null; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java index 89ff910faebf0..5affc6a9f39e4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java @@ -8,6 +8,10 @@ package org.elasticsearch.action.admin.indices.stats; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.core.Nullable; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -20,11 +24,23 @@ public class IndexStats implements Iterable { private final String uuid; + private final ClusterHealthStatus health; + + private final IndexMetadata.State state; + private final ShardStats shards[]; - public IndexStats(String index, String uuid, ShardStats[] shards) { + public IndexStats( + String index, + String uuid, + @Nullable ClusterHealthStatus health, + @Nullable IndexMetadata.State state, + ShardStats[] shards + ) { this.index = index; this.uuid = uuid; + this.health = health; + this.state = state; this.shards = shards; } @@ -36,6 +52,14 @@ public String getUuid() { return uuid; } + public ClusterHealthStatus getHealth() { + return health; + } + + public IndexMetadata.State getState() { + return state; + } + public ShardStats[] getShards() { return this.shards; } @@ -48,11 +72,7 @@ public Map getIndexShards() { } Map> tmpIndexShards = new HashMap<>(); for (ShardStats shard : shards) { - List lst = tmpIndexShards.get(shard.getShardRouting().id()); - if (lst == null) { - lst = new ArrayList<>(); - tmpIndexShards.put(shard.getShardRouting().id(), lst); - } + List lst = tmpIndexShards.computeIfAbsent(shard.getShardRouting().id(), ignored -> new ArrayList<>()); lst.add(shard); } indexShards = new HashMap<>(); @@ -106,11 +126,15 @@ public CommonStats getPrimaries() { public static class IndexStatsBuilder { private final String indexName; private final String uuid; + private final ClusterHealthStatus health; + private final IndexMetadata.State state; private final List shards = new ArrayList<>(); - public IndexStatsBuilder(String indexName, String uuid) { + public IndexStatsBuilder(String indexName, String uuid, @Nullable ClusterHealthStatus health, @Nullable IndexMetadata.State state) { this.indexName = indexName; this.uuid = uuid; + this.health = health; + this.state = state; } public IndexStatsBuilder add(ShardStats shardStats) { @@ -119,7 +143,7 @@ public IndexStatsBuilder add(ShardStats shardStats) { } public IndexStats build() { - return new IndexStats(indexName, uuid, shards.toArray(new ShardStats[shards.size()])); + return new IndexStats(indexName, uuid, health, state, shards.toArray(new ShardStats[shards.size()])); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index 042cd28cbf54f..0cc3c45b60d75 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * A request to get indices level stats. Allow to enable different stats to be returned. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 7b4b14c606aaa..25c804a340a72 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -8,9 +8,14 @@ package org.elasticsearch.action.admin.indices.stats; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -21,20 +26,33 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; public class IndicesStatsResponse extends BroadcastResponse { - private ShardStats[] shards; + private final Map indexHealthMap; + + private final Map indexStateMap; + + private final ShardStats[] shards; private Map shardStatsMap; IndicesStatsResponse(StreamInput in) throws IOException { super(in); - shards = in.readArray(ShardStats::new, (size) -> new ShardStats[size]); + shards = in.readArray(ShardStats::new, ShardStats[]::new); + if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + indexHealthMap = in.readMap(StreamInput::readString, ClusterHealthStatus::readFrom); + indexStateMap = in.readMap(StreamInput::readString, IndexMetadata.State::readFrom); + } else { + indexHealthMap = Map.of(); + indexStateMap = Map.of(); + } } IndicesStatsResponse( @@ -42,10 +60,28 @@ public class IndicesStatsResponse extends BroadcastResponse { int totalShards, int successfulShards, int failedShards, - List shardFailures + List shardFailures, + ClusterState clusterState ) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; + Objects.requireNonNull(clusterState); + Objects.requireNonNull(shards); + Map indexHealthModifiableMap = new HashMap<>(); + Map indexStateModifiableMap = new HashMap<>(); + for (ShardStats shard : shards) { + Index index = shard.getShardRouting().index(); + IndexMetadata indexMetadata = clusterState.getMetadata().index(index); + if (indexMetadata != null) { + indexHealthModifiableMap.computeIfAbsent( + index.getName(), + ignored -> new ClusterIndexHealth(indexMetadata, clusterState.routingTable().index(index)).getStatus() + ); + indexStateModifiableMap.computeIfAbsent(index.getName(), ignored -> indexMetadata.getState()); + } + } + indexHealthMap = unmodifiableMap(indexHealthModifiableMap); + indexStateMap = unmodifiableMap(indexStateModifiableMap); } public Map asMap() { @@ -83,7 +119,7 @@ public Map getIndices() { Index index = shard.getShardRouting().index(); IndexStatsBuilder indexStatsBuilder = indexToIndexStatsBuilder.computeIfAbsent( index.getName(), - k -> new IndexStatsBuilder(k, index.getUUID()) + k -> new IndexStatsBuilder(k, index.getUUID(), indexHealthMap.get(index.getName()), indexStateMap.get(index.getName())) ); indexStatsBuilder.add(shard); } @@ -128,6 +164,10 @@ public CommonStats getPrimaries() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeArray(shards); + if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + out.writeMap(indexHealthMap, StreamOutput::writeString, (o, s) -> s.writeTo(o)); + out.writeMap(indexStateMap, StreamOutput::writeString, (o, s) -> s.writeTo(o)); + } } @Override @@ -157,6 +197,12 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t for (IndexStats indexStats : getIndices().values()) { builder.startObject(indexStats.getIndex()); builder.field("uuid", indexStats.getUuid()); + if (indexStats.getHealth() != null) { + builder.field("health", indexStats.getHealth().toString().toLowerCase(Locale.ROOT)); + } + if (indexStats.getState() != null) { + builder.field("status", indexStats.getState().toString().toLowerCase(Locale.ROOT)); + } builder.startObject("primaries"); indexStats.getPrimaries().toXContent(builder, params); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 1f24c178f34b4..44bb62cd0f04e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -99,7 +99,8 @@ protected IndicesStatsResponse newResponse( totalShards, successfulShards, failedShards, - shardFailures + shardFailures, + clusterState ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 26f93916d758a..9f8ac48feb861 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder< DeleteIndexTemplateRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index ff5422f1ab2cb..8eb9d0b93e6b1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.template.get; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder< GetIndexTemplatesRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index af0acab918323..55036ab63df3c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.action.admin.indices.template.get; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -26,6 +24,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAction { @@ -73,9 +72,9 @@ protected void masterOperation( for (String name : request.names()) { if (Regex.isSimpleMatchPattern(name)) { - for (ObjectObjectCursor entry : state.metadata().templates()) { - if (Regex.simpleMatch(name, entry.key)) { - results.add(entry.value); + for (Map.Entry entry : state.metadata().templates().entrySet()) { + if (Regex.simpleMatch(name, entry.getKey())) { + results.add(entry.getValue()); } } } else if (state.metadata().templates().containsKey(name)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 37bbfb99270f0..b321cda3ab50a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -12,13 +12,13 @@ import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -51,7 +51,7 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { resolvedTemplate = in.readOptionalWriteable(Template::new); if (in.readBoolean()) { int overlappingTemplatesCount = in.readInt(); - overlappingTemplates = new HashMap<>(overlappingTemplatesCount, 1L); + overlappingTemplates = Maps.newMapWithExpectedSize(overlappingTemplatesCount); for (int i = 0; i < overlappingTemplatesCount; i++) { String templateName = in.readString(); overlappingTemplates.put(templateName, in.readStringList()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 157e51cc5cc57..d72e28058c8f4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasMetadata; -import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -29,9 +28,12 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -58,7 +60,8 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea private final MetadataIndexTemplateService indexTemplateService; private final NamedXContentRegistry xContentRegistry; private final IndicesService indicesService; - private final AliasValidator aliasValidator; + private final SystemIndices systemIndices; + private final Set indexSettingProviders; @Inject public TransportSimulateIndexTemplateAction( @@ -69,7 +72,9 @@ public TransportSimulateIndexTemplateAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NamedXContentRegistry xContentRegistry, - IndicesService indicesService + IndicesService indicesService, + SystemIndices systemIndices, + IndexSettingProviders indexSettingProviders ) { super( SimulateIndexTemplateAction.NAME, @@ -85,7 +90,8 @@ public TransportSimulateIndexTemplateAction( this.indexTemplateService = indexTemplateService; this.xContentRegistry = xContentRegistry; this.indicesService = indicesService; - this.aliasValidator = new AliasValidator(); + this.systemIndices = systemIndices; + this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); } @Override @@ -131,7 +137,8 @@ protected void masterOperation( stateWithTemplate, xContentRegistry, indicesService, - aliasValidator + systemIndices, + indexSettingProviders ); final Map> overlapping = new HashMap<>(); @@ -182,23 +189,40 @@ public static Template resolveTemplate( final ClusterState simulatedState, final NamedXContentRegistry xContentRegistry, final IndicesService indicesService, - final AliasValidator aliasValidator + final SystemIndices systemIndices, + Set indexSettingProviders ) throws Exception { - Settings settings = resolveSettings(simulatedState.metadata(), matchingTemplate); + Settings templateSettings = resolveSettings(simulatedState.metadata(), matchingTemplate); List> resolvedAliases = MetadataIndexTemplateService.resolveAliases( simulatedState.metadata(), matchingTemplate ); + ComposableIndexTemplate template = simulatedState.metadata().templatesV2().get(matchingTemplate); // create the index with dummy settings in the cluster state so we can parse and validate the aliases - Settings dummySettings = Settings.builder() + Settings.Builder dummySettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(settings) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .build(); + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); + + // First apply settings sourced from index settings providers + Settings.Builder additionalSettings = Settings.builder(); + for (var provider : indexSettingProviders) { + Settings result = provider.getAdditionalIndexSettings( + indexName, + template.getDataStreamTemplate() != null ? indexName : null, + simulatedState.getMetadata(), + System.currentTimeMillis(), + templateSettings + ); + dummySettings.put(result); + additionalSettings.put(result); + } + // Then apply settings resolved from templates: + dummySettings.put(templateSettings); + final IndexMetadata indexMetadata = IndexMetadata.builder(indexName).settings(dummySettings).build(); final ClusterState tempClusterState = ClusterState.builder(simulatedState) @@ -212,20 +236,20 @@ public static Template resolveTemplate( Set.of(), resolvedAliases, tempClusterState.metadata(), - aliasValidator, xContentRegistry, // the context is only used for validation so it's fine to pass fake values for the // shard id and the current timestamp tempIndexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()), - tempIndexService.dateMathExpressionResolverAt() + tempIndexService.dateMathExpressionResolverAt(), + systemIndices::isSystemName ) ); Map aliasesByName = aliases.stream().collect(Collectors.toMap(AliasMetadata::getAlias, Function.identity())); // empty request mapping as the user can't specify any explicit mappings via the simulate api - List> mappings = MetadataCreateIndexService.collectV2Mappings( - "{}", + List mappings = MetadataCreateIndexService.collectV2Mappings( + null, simulatedState, matchingTemplate, xContentRegistry, @@ -236,10 +260,8 @@ public static Template resolveTemplate( indexMetadata, tempIndexService -> { MapperService mapperService = tempIndexService.mapperService(); - for (Map mapping : mappings) { - if (mapping.isEmpty() == false) { - mapperService.merge(MapperService.SINGLE_MAPPING_NAME, mapping, MapperService.MergeReason.INDEX_TEMPLATE); - } + for (CompressedXContent mapping : mappings) { + mapperService.merge(MapperService.SINGLE_MAPPING_NAME, mapping, MapperService.MergeReason.INDEX_TEMPLATE); } DocumentMapper documentMapper = mapperService.documentMapper(); @@ -247,6 +269,7 @@ public static Template resolveTemplate( } ); + Settings settings = Settings.builder().put(templateSettings).put(additionalSettings.build()).build(); return new Template(settings, mergedMapping, aliasesByName); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index f02a5757ccc95..7c6bd58c292f4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; @@ -22,7 +21,10 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -32,6 +34,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; @@ -47,7 +50,8 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi private final MetadataIndexTemplateService indexTemplateService; private final NamedXContentRegistry xContentRegistry; private final IndicesService indicesService; - private AliasValidator aliasValidator; + private final SystemIndices systemIndices; + private final Set indexSettingProviders; @Inject public TransportSimulateTemplateAction( @@ -58,7 +62,9 @@ public TransportSimulateTemplateAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NamedXContentRegistry xContentRegistry, - IndicesService indicesService + IndicesService indicesService, + SystemIndices systemIndices, + IndexSettingProviders indexSettingProviders ) { super( SimulateTemplateAction.NAME, @@ -74,7 +80,8 @@ public TransportSimulateTemplateAction( this.indexTemplateService = indexTemplateService; this.xContentRegistry = xContentRegistry; this.indicesService = indicesService; - this.aliasValidator = new AliasValidator(); + this.systemIndices = systemIndices; + this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); } @Override @@ -152,7 +159,8 @@ protected void masterOperation( stateWithTemplate, xContentRegistry, indicesService, - aliasValidator + systemIndices, + indexSettingProviders ); listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 426fc642b56ff..17439f2312036 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index e6ceb67f34ed5..a0eb40d1749e9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.validate.query; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.index.query.QueryBuilder; public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder< diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 11f3147c93d82..b5a6089b0888a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -523,33 +523,23 @@ private void writeResponseType(StreamOutput out) throws IOException { private static DocWriteResponse readResponse(ShardId shardId, StreamInput in) throws IOException { int type = in.readByte(); - switch (type) { - case 0: - return new IndexResponse(shardId, in); - case 1: - return new DeleteResponse(shardId, in); - case 2: - return null; - case 3: - return new UpdateResponse(shardId, in); - default: - throw new IllegalArgumentException("Unexpected type [" + type + "]"); - } + return switch (type) { + case 0 -> new IndexResponse(shardId, in); + case 1 -> new DeleteResponse(shardId, in); + case 2 -> null; + case 3 -> new UpdateResponse(shardId, in); + default -> throw new IllegalArgumentException("Unexpected type [" + type + "]"); + }; } private static DocWriteResponse readResponse(StreamInput in) throws IOException { int type = in.readByte(); - switch (type) { - case 0: - return new IndexResponse(in); - case 1: - return new DeleteResponse(in); - case 2: - return null; - case 3: - return new UpdateResponse(in); - default: - throw new IllegalArgumentException("Unexpected type [" + type + "]"); - } + return switch (type) { + case 0 -> new IndexResponse(in); + case 1 -> new DeleteResponse(in); + case 2 -> null; + case 3 -> new UpdateResponse(in); + default -> throw new IllegalArgumentException("Unexpected type [" + type + "]"); + }; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java index 254650b004139..144880796a3d3 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java @@ -239,7 +239,7 @@ public void markOperationAsExecuted(Engine.Result result) { final BulkItemRequest current = getCurrentItem(); DocWriteRequest docWriteRequest = getRequestToExecute(); switch (result.getResultType()) { - case SUCCESS: + case SUCCESS -> { final DocWriteResponse response; if (result.getOperationType() == Engine.Operation.TYPE.INDEX) { Engine.IndexResult indexResult = (Engine.IndexResult) result; @@ -269,25 +269,22 @@ public void markOperationAsExecuted(Engine.Result result) { // set a blank ShardInfo so we can safely send it to the replicas. We won't use it in the real response though. executionResult.getResponse().setShardInfo(new ReplicationResponse.ShardInfo()); locationToSync = TransportWriteAction.locationToSync(locationToSync, result.getTranslogLocation()); - break; - case FAILURE: - executionResult = BulkItemResponse.failure( - current.id(), - docWriteRequest.opType(), - // Make sure to use request.index() here, if you - // use docWriteRequest.index() it will use the - // concrete index instead of an alias if used! - new BulkItemResponse.Failure( - request.index(), - docWriteRequest.id(), - result.getFailure(), - result.getSeqNo(), - result.getTerm() - ) - ); - break; - default: - throw new AssertionError("unknown result type for " + getCurrentItem() + ": " + result.getResultType()); + } + case FAILURE -> executionResult = BulkItemResponse.failure( + current.id(), + docWriteRequest.opType(), + // Make sure to use request.index() here, if you + // use docWriteRequest.index() it will use the + // concrete index instead of an alias if used! + new BulkItemResponse.Failure( + request.index(), + docWriteRequest.id(), + result.getFailure(), + result.getSeqNo(), + result.getTerm() + ) + ); + default -> throw new AssertionError("unknown result type for " + getCurrentItem() + ": " + result.getResultType()); } currentItemState = ItemProcessingState.EXECUTED; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 2f52535dc6ea9..7095a754b3f2c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -223,7 +223,7 @@ public static Builder builder(Client client, Listener listener, Scheduler flushS * @param listener The BulkProcessor listener that gets called on bulk events * @return the builder for BulkProcessor * @deprecated Use {@link #builder(BiConsumer, Listener, String)} - * with client::bulk as the first argument, or {@link #builder(org.elasticsearch.client.Client, + * with client::bulk as the first argument, or {@link #builder(org.elasticsearch.client.internal.Client, * org.elasticsearch.action.bulk.BulkProcessor.Listener, org.elasticsearch.threadpool.Scheduler, * org.elasticsearch.threadpool.Scheduler, java.lang.Runnable)} and manage the flush and retry schedulers explicitly */ diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 98760f0fb15a2..790637a80ef5e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -47,7 +47,7 @@ * and allows to executes it in a single batch. * * Note that we only support refresh on the bulk request not per item. - * @see org.elasticsearch.client.Client#bulk(BulkRequest) + * @see org.elasticsearch.client.internal.Client#bulk(BulkRequest) */ public class BulkRequest extends ActionRequest implements diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 7ce3cc23800bb..fe0488962c8f0 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -18,7 +18,7 @@ import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 258d91c712ba7..ae3c53ca5b8c8 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContentObject; @@ -19,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -124,7 +124,7 @@ public BulkItemResponse[] getItems() { @Override public Iterator iterator() { - return Arrays.stream(responses).iterator(); + return Iterators.forArray(responses); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 851a45af20ec3..ee40476e94b79 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -31,7 +31,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -77,7 +77,6 @@ import java.util.function.LongSupplier; import java.util.stream.Collectors; -import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.EXCLUDED_DATA_STREAMS_KEY; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -163,8 +162,7 @@ public static IndexRequest getIndexWriteRequest(DocWriteRequest docWriteReque IndexRequest indexRequest = null; if (docWriteRequest instanceof IndexRequest) { indexRequest = (IndexRequest) docWriteRequest; - } else if (docWriteRequest instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) docWriteRequest; + } else if (docWriteRequest instanceof UpdateRequest updateRequest) { indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); } return indexRequest; @@ -216,8 +214,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec hasIndexRequestsWithPipelines |= indexRequestHasPipeline; } - if (actionRequest instanceof IndexRequest) { - IndexRequest ir = (IndexRequest) actionRequest; + if (actionRequest instanceof IndexRequest ir) { ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion); if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) { throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally"); @@ -348,6 +345,11 @@ public void onRejection(Exception rejectedException) { } static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, Metadata metadata) { + DocWriteRequest.OpType opType = writeRequest.opType(); + if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { + // op type not create or index, then bail early + return; + } IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(writeRequest.index()); if (indexAbstraction == null) { return; @@ -365,7 +367,6 @@ static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest // so checking if write op is append-only and if so fail. // (Updates and deletes are allowed to target a backing index) - DocWriteRequest.OpType opType = writeRequest.opType(); // CREATE op_type is considered append-only and // INDEX op_type is considered append-only when no if_primary_term and if_seq_no is specified. // (the latter maybe an update, but at this stage we can't determine that. In order to determine @@ -507,30 +508,36 @@ protected void doRun() { if (addFailureIfRequiresAliasAndAliasIsMissing(docWriteRequest, i, metadata)) { continue; } - if (addFailureIfIndexIsUnavailable(docWriteRequest, i, concreteIndices, metadata)) { + if (addFailureIfIndexCannotBeCreated(docWriteRequest, i)) { continue; } - Index concreteIndex = concreteIndices.resolveIfAbsent(docWriteRequest); + IndexAbstraction ia = null; + boolean includeDataStreams = docWriteRequest.opType() == DocWriteRequest.OpType.CREATE; try { + ia = concreteIndices.resolveIfAbsent(docWriteRequest); + if (ia.isDataStreamRelated() && includeDataStreams == false) { + throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); + } // The ConcreteIndices#resolveIfAbsent(...) method validates via IndexNameExpressionResolver whether // an operation is allowed in index into a data stream, but this isn't done when resolve call is cached, so // the validation needs to be performed here too. - IndexAbstraction indexAbstraction = clusterState.getMetadata().getIndicesLookup().get(concreteIndex.getName()); - if (indexAbstraction.getParentDataStream() != null && + if (ia.getParentDataStream() != null && // avoid valid cases when directly indexing into a backing index // (for example when directly indexing into .ds-logs-foobar-000001) - concreteIndex.getName().equals(docWriteRequest.index()) == false + ia.getName().equals(docWriteRequest.index()) == false && docWriteRequest.opType() != DocWriteRequest.OpType.CREATE) { throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); } - if (docWriteRequest.opType() == OpType.CREATE || docWriteRequest.opType() == OpType.INDEX) { - prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); - prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); - } + prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); + prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); docWriteRequest.process(); + final Index concreteIndex = docWriteRequest.getConcreteWriteIndex(ia, metadata); + if (addFailureIfIndexIsClosed(docWriteRequest, concreteIndex, i, metadata)) { + continue; + } IndexRouting indexRouting = concreteIndices.routing(concreteIndex); int shardId = docWriteRequest.route(indexRouting); List shardRequests = requestsByShard.computeIfAbsent( @@ -538,8 +545,9 @@ protected void doRun() { shard -> new ArrayList<>() ); shardRequests.add(new BulkItemRequest(i, docWriteRequest)); - } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.id(), e); + } catch (ElasticsearchParseException | IllegalArgumentException | IndexNotFoundException | RoutingMissingException e) { + String name = ia != null ? ia.getName() : docWriteRequest.index(); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(name, docWriteRequest.id(), e); BulkItemResponse bulkItemResponse = BulkItemResponse.failure(i, docWriteRequest.opType(), failure); responses.set(i, bulkItemResponse); // make sure the request gets never processed again @@ -589,7 +597,7 @@ public void onResponse(BulkShardResponse bulkShardResponse) { public void onFailure(Exception e) { // create failures for all relevant requests for (BulkItemRequest request : requests) { - final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); + final String indexName = request.index(); DocWriteRequest docWriteRequest = request.request(); BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e); responses.set(request.id(), BulkItemResponse.failure(request.id(), docWriteRequest.opType(), failure)); @@ -676,26 +684,7 @@ private boolean addFailureIfRequiresAliasAndAliasIsMissing(DocWriteRequest re return false; } - private boolean addFailureIfIndexIsUnavailable( - DocWriteRequest request, - int idx, - final ConcreteIndices concreteIndices, - final Metadata metadata - ) { - IndexNotFoundException cannotCreate = indicesThatCannotBeCreated.get(request.index()); - if (cannotCreate != null) { - addFailure(request, idx, cannotCreate); - return true; - } - Index concreteIndex = concreteIndices.getConcreteIndex(request.index()); - if (concreteIndex == null) { - try { - concreteIndex = concreteIndices.resolveIfAbsent(request); - } catch (IndexClosedException | IndexNotFoundException | IllegalArgumentException ex) { - addFailure(request, idx, ex); - return true; - } - } + private boolean addFailureIfIndexIsClosed(DocWriteRequest request, Index concreteIndex, int idx, final Metadata metadata) { IndexMetadata indexMetadata = metadata.getIndexSafe(concreteIndex); if (indexMetadata.getState() == IndexMetadata.State.CLOSE) { addFailure(request, idx, new IndexClosedException(concreteIndex)); @@ -704,6 +693,15 @@ private boolean addFailureIfIndexIsUnavailable( return false; } + private boolean addFailureIfIndexCannotBeCreated(DocWriteRequest request, int idx) { + IndexNotFoundException cannotCreate = indicesThatCannotBeCreated.get(request.index()); + if (cannotCreate != null) { + addFailure(request, idx, cannotCreate); + return true; + } + return false; + } + private void addFailure(DocWriteRequest request, int idx, Exception unavailableException) { BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.id(), unavailableException); BulkItemResponse bulkItemResponse = BulkItemResponse.failure(idx, request.opType(), failure); @@ -728,7 +726,7 @@ void executeBulk( private static class ConcreteIndices { private final ClusterState state; private final IndexNameExpressionResolver indexNameExpressionResolver; - private final Map indices = new HashMap<>(); + private final Map indexAbstractions = new HashMap<>(); private final Map routings = new HashMap<>(); ConcreteIndices(ClusterState state, IndexNameExpressionResolver indexNameExpressionResolver) { @@ -736,32 +734,11 @@ private static class ConcreteIndices { this.indexNameExpressionResolver = indexNameExpressionResolver; } - Index getConcreteIndex(String indexOrAlias) { - return indices.get(indexOrAlias); - } - - Index resolveIfAbsent(DocWriteRequest request) { - Index concreteIndex = indices.get(request.index()); - if (concreteIndex == null) { - boolean includeDataStreams = request.opType() == DocWriteRequest.OpType.CREATE; - try { - concreteIndex = indexNameExpressionResolver.concreteWriteIndex( - state, - request.indicesOptions(), - request.indices()[0], - false, - includeDataStreams - ); - } catch (IndexNotFoundException e) { - if (includeDataStreams == false && e.getMetadataKeys().contains(EXCLUDED_DATA_STREAMS_KEY)) { - throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); - } else { - throw e; - } - } - indices.put(request.index(), concreteIndex); - } - return concreteIndex; + IndexAbstraction resolveIfAbsent(DocWriteRequest request) { + return indexAbstractions.computeIfAbsent( + request.index(), + key -> indexNameExpressionResolver.resolveWriteIndexAbstraction(state, request) + ); } IndexRouting routing(Index index) { @@ -802,27 +779,27 @@ private void processBulkIndexIngestRequest( // (this will happen if pre-processing all items in the bulk failed) actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0)); } else { + ActionRunnable runnable = new ActionRunnable<>(actionListener) { + @Override + protected void doRun() { + doInternalExecute(task, bulkRequest, executorName, actionListener); + } + + @Override + public boolean isForceExecution() { + // If we fork back to a write thread we **not** should fail, because tp queue is full. + // (Otherwise the work done during ingest will be lost) + // It is okay to force execution here. Throttling of write requests happens prior to + // ingest when a node receives a bulk request. + return true; + } + }; // If a processor went async and returned a response on a different thread then // before we continue the bulk request we should fork back on a write thread: if (originalThread == Thread.currentThread()) { - assert Thread.currentThread().getName().contains(executorName); - doInternalExecute(task, bulkRequest, executorName, actionListener); + threadPool.executor(Names.SAME).execute(runnable); } else { - threadPool.executor(executorName).execute(new ActionRunnable<>(actionListener) { - @Override - protected void doRun() { - doInternalExecute(task, bulkRequest, executorName, actionListener); - } - - @Override - public boolean isForceExecution() { - // If we fork back to a write thread we **not** should fail, because tp queue is full. - // (Otherwise the work done during ingest will be lost) - // It is okay to force execution here. Throttling of write requests happens prior to - // ingest when a node receives a bulk request. - return true; - } - }); + threadPool.executor(executorName).execute(runnable); } } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 7a095b5281a33..cf15b273cb0b5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -568,8 +568,7 @@ private static Engine.Result performOpOnReplica( ) throws Exception { final Engine.Result result; switch (docWriteRequest.opType()) { - case CREATE: - case INDEX: + case CREATE, INDEX -> { final IndexRequest indexRequest = (IndexRequest) docWriteRequest; final SourceToParse sourceToParse = new SourceToParse( indexRequest.id(), @@ -586,8 +585,8 @@ private static Engine.Result performOpOnReplica( indexRequest.isRetry(), sourceToParse ); - break; - case DELETE: + } + case DELETE -> { DeleteRequest deleteRequest = (DeleteRequest) docWriteRequest; result = replica.applyDeleteOperationOnReplica( primaryResponse.getSeqNo(), @@ -595,10 +594,11 @@ private static Engine.Result performOpOnReplica( primaryResponse.getVersion(), deleteRequest.id() ); - break; - default: + } + default -> { assert false : "Unexpected request operation type on replica: " + docWriteRequest + ";primary result: " + primaryResponse; throw new IllegalStateException("Unexpected request operation type on replica: " + docWriteRequest.opType().getLowercase()); + } } if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { // Even though the primary waits on all nodes to ack the mapping changes to the master diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 7a48eb90ab722..1053eddced6c0 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -32,14 +32,14 @@ /** * A request to delete a document from an index based on its type and id. Best created using - * {@link org.elasticsearch.client.Requests#deleteRequest(String)}. + * {@link org.elasticsearch.client.internal.Requests#deleteRequest(String)}. *

    * The operation requires the {@link #index()} and {@link #id(String)} to * be set. * * @see DeleteResponse - * @see org.elasticsearch.client.Client#delete(DeleteRequest) - * @see org.elasticsearch.client.Requests#deleteRequest(String) + * @see org.elasticsearch.client.internal.Client#delete(DeleteRequest) + * @see org.elasticsearch.client.internal.Requests#deleteRequest(String) */ public class DeleteRequest extends ReplicatedWriteRequest implements diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index a68790eee36ce..5cfdd2b796b14 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.VersionType; diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java index 3d5f53faff7fe..9fd9c5fcd791f 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java @@ -22,7 +22,7 @@ * The response of the delete action. * * @see org.elasticsearch.action.delete.DeleteRequest - * @see org.elasticsearch.client.Client#delete(DeleteRequest) + * @see org.elasticsearch.client.internal.Client#delete(DeleteRequest) */ public class DeleteResponse extends DocWriteResponse { diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java index 591db097aafed..f73972c2b7356 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.explain; import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.QueryBuilder; diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index a6f4334a63fb1..3ee3f88aad75e 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -54,7 +54,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject { private static final ParseField NON_SEARCHABLE_INDICES_FIELD = new ParseField("non_searchable_indices"); private static final ParseField NON_AGGREGATABLE_INDICES_FIELD = new ParseField("non_aggregatable_indices"); private static final ParseField NON_DIMENSION_INDICES_FIELD = new ParseField("non_dimension_indices"); - private static final ParseField METRIC_CONFLICTS_INDICES_FIELD = new ParseField("mertric_conflicts_indices"); + private static final ParseField METRIC_CONFLICTS_INDICES_FIELD = new ParseField("metric_conflicts_indices"); private static final ParseField META_FIELD = new ParseField("meta"); private final String name; @@ -480,7 +480,7 @@ static class Builder { private boolean isAggregatable; private boolean isDimension; private TimeSeriesParams.MetricType metricType; - private boolean mertricTypeIsSet; + private boolean metricTypeIsSet; private List indiceList; private Map> meta; @@ -491,7 +491,7 @@ static class Builder { this.isAggregatable = true; this.isDimension = true; this.metricType = null; - this.mertricTypeIsSet = false; + this.metricTypeIsSet = false; this.indiceList = new ArrayList<>(); this.meta = new HashMap<>(); } @@ -516,12 +516,12 @@ void add( this.isDimension &= isDimension; // If we have discrepancy in metric types or in some indices this field is not marked as a metric field - we will // treat is a non-metric field and report this discrepancy in metricConflictsIndices - if (this.mertricTypeIsSet) { + if (this.metricTypeIsSet) { if (this.metricType != metricType) { this.metricType = null; } } else { - this.mertricTypeIsSet = true; + this.metricTypeIsSet = true; this.metricType = metricType; } for (Map.Entry entry : meta.entrySet()) { @@ -609,19 +609,11 @@ FieldCapabilities build(boolean withIndices) { } } - private static class IndexCaps { - final String name; - final boolean isSearchable; - final boolean isAggregatable; - final boolean isDimension; - final TimeSeriesParams.MetricType metricType; - - IndexCaps(String name, boolean isSearchable, boolean isAggregatable, boolean isDimension, TimeSeriesParams.MetricType metricType) { - this.name = name; - this.isSearchable = isSearchable; - this.isAggregatable = isAggregatable; - this.isDimension = isDimension; - this.metricType = metricType; - } - } + private record IndexCaps( + String name, + boolean isSearchable, + boolean isAggregatable, + boolean isDimension, + TimeSeriesParams.MetricType metricType + ) {} } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java index eaeb1b48a9378..16f94e6c62c46 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.index.query.QueryBuilder; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index 1713b4f45874c..7429ec5e8b50a 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index e37c4307695e3..e404f1f6e350a 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -27,14 +27,14 @@ /** * A request to get a document (its source) from an index based on its id. Best created using - * {@link org.elasticsearch.client.Requests#getRequest(String)}. + * {@link org.elasticsearch.client.internal.Requests#getRequest(String)}. *

    * The operation requires the {@link #index()} and {@link #id(String)} * to be set. * * @see org.elasticsearch.action.get.GetResponse - * @see org.elasticsearch.client.Requests#getRequest(String) - * @see org.elasticsearch.client.Client#get(GetRequest) + * @see org.elasticsearch.client.internal.Requests#getRequest(String) + * @see org.elasticsearch.client.internal.Client#get(GetRequest) */ // It's not possible to suppress teh warning at #realtime(boolean) at a method-level. @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java index fce83df830c86..af0b56710fd59 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.VersionType; diff --git a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java index 40694b3cc141b..7af64bed9f3cb 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -31,7 +31,7 @@ * The response of a get action. * * @see GetRequest - * @see org.elasticsearch.client.Client#get(GetRequest) + * @see org.elasticsearch.client.internal.Client#get(GetRequest) */ public class GetResponse extends ActionResponse implements Iterable, ToXContentObject { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java index 398e994fe7ebd..08371a054c6e4 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * A multi get document action request builder. diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 57158194416b9..eb979bc578554 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,7 +28,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -130,7 +130,7 @@ public MultiGetItemResponse[] getResponses() { @Override public Iterator iterator() { - return Arrays.stream(responses).iterator(); + return Iterators.forArray(responses); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java index da5748367fbd2..c584a89e19555 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index b9ac0e9dec2a5..0f3b7d755976c 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -17,7 +17,9 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.internal.Requests; +import org.elasticsearch.cluster.metadata.IndexAbstraction; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; @@ -28,6 +30,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; @@ -47,7 +50,7 @@ /** * Index request to index a typed JSON document into a specific index and make it searchable. Best - * created using {@link org.elasticsearch.client.Requests#indexRequest(String)}. + * created using {@link org.elasticsearch.client.internal.Requests#indexRequest(String)}. * * The index requires the {@link #index()}, {@link #id(String)} and * {@link #source(byte[], XContentType)} to be set. @@ -59,8 +62,8 @@ * If the {@link #id(String)} is not set, it will be automatically generated. * * @see IndexResponse - * @see org.elasticsearch.client.Requests#indexRequest(String) - * @see org.elasticsearch.client.Client#index(IndexRequest) + * @see org.elasticsearch.client.internal.Requests#indexRequest(String) + * @see org.elasticsearch.client.internal.Client#index(IndexRequest) */ public class IndexRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { @@ -718,6 +721,11 @@ public boolean isRequireAlias() { return requireAlias; } + @Override + public Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) { + return ia.getWriteIndex(this, metadata); + } + @Override public int route(IndexRouting indexRouting) { assert id != null : "route must be called after process"; diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 2e49cb015a145..3a190c4e314c0 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.VersionType; diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 666cee6adaf81..fe631f53c975c 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -23,7 +23,7 @@ * A response of an index operation, * * @see org.elasticsearch.action.index.IndexRequest - * @see org.elasticsearch.client.Client#index(IndexRequest) + * @see org.elasticsearch.client.internal.Client#index(IndexRequest) */ public class IndexResponse extends DocWriteResponse { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java index f177df1260dad..32e1154a8af0a 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class DeletePipelineRequestBuilder extends ActionRequestBuilder { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java index 375f661ecb9f8..9d11fddc5f92b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; public class GetPipelineRequestBuilder extends MasterNodeReadOperationRequestBuilder< GetPipelineRequest, diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java index 55d87a0a492fc..613710e87929c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java @@ -124,8 +124,7 @@ public static GetPipelineResponse fromXContent(XContentParser parser) throws IOE public boolean equals(Object other) { if (other == null) { return false; - } else if (other instanceof GetPipelineResponse) { - GetPipelineResponse otherResponse = (GetPipelineResponse) other; + } else if (other instanceof GetPipelineResponse otherResponse) { if (pipelines == null) { return otherResponse.pipelines == null; } else { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java index 9fc44570bef3f..0a68e13a24465 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index df03395b691b9..ff48d73752b7f 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; -import org.elasticsearch.client.OriginSettingClient; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.OriginSettingClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java index 6916fcb169914..fca5a2ea43b42 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java @@ -67,23 +67,23 @@ public void execute(SimulatePipelineRequest.Parsed request, ActionListener { final AtomicInteger counter = new AtomicInteger(); final List responses = new CopyOnWriteArrayList<>( - new SimulateDocumentBaseResult[request.getDocuments().size()] + new SimulateDocumentBaseResult[request.documents().size()] ); - if (request.getDocuments().isEmpty()) { - l.onResponse(new SimulatePipelineResponse(request.getPipeline().getId(), request.isVerbose(), responses)); + if (request.documents().isEmpty()) { + l.onResponse(new SimulatePipelineResponse(request.pipeline().getId(), request.verbose(), responses)); return; } int iter = 0; - for (IngestDocument ingestDocument : request.getDocuments()) { + for (IngestDocument ingestDocument : request.documents()) { final int index = iter; - executeDocument(request.getPipeline(), ingestDocument, request.isVerbose(), (response, e) -> { + executeDocument(request.pipeline(), ingestDocument, request.verbose(), (response, e) -> { if (response != null) { responses.set(index, response); } - if (counter.incrementAndGet() == request.getDocuments().size()) { - l.onResponse(new SimulatePipelineResponse(request.getPipeline().getId(), request.isVerbose(), responses)); + if (counter.incrementAndGet() == request.documents().size()) { + l.onResponse(new SimulatePipelineResponse(request.pipeline().getId(), request.verbose(), responses)); } }); iter++; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index 89db6cd5765ea..081175819f5d2 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -115,28 +115,12 @@ public static final class Fields { static final String SOURCE = "_source"; } - static class Parsed { - private final List documents; - private final Pipeline pipeline; - private final boolean verbose; - + record Parsed(Pipeline pipeline, List documents, boolean verbose) { Parsed(Pipeline pipeline, List documents, boolean verbose) { this.pipeline = pipeline; this.documents = Collections.unmodifiableList(documents); this.verbose = verbose; } - - public Pipeline getPipeline() { - return pipeline; - } - - public List getDocuments() { - return documents; - } - - public boolean isVerbose() { - return verbose; - } } static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline"; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java index 0b613035450c1..92ee01f552da4 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java index e1b47caac2d08..a5dc7dda64134 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java @@ -69,17 +69,11 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte } } else if (token.equals(Token.START_OBJECT)) { switch (fieldName) { - case WriteableIngestDocument.DOC_FIELD: - result = new SimulateDocumentBaseResult( - WriteableIngestDocument.INGEST_DOC_PARSER.apply(parser, null).getIngestDocument() - ); - break; - case "error": - result = new SimulateDocumentBaseResult(ElasticsearchException.fromXContent(parser)); - break; - default: - parser.skipChildren(); - break; + case WriteableIngestDocument.DOC_FIELD -> result = new SimulateDocumentBaseResult( + WriteableIngestDocument.INGEST_DOC_PARSER.apply(parser, null).getIngestDocument() + ); + case "error" -> result = new SimulateDocumentBaseResult(ElasticsearchException.fromXContent(parser)); + default -> parser.skipChildren(); } } // else it is a value skip it } diff --git a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java index 3cbf5a3875427..819daed701ced 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -86,7 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("cluster_name", clusterName.value()); builder.field("cluster_uuid", clusterUuid); builder.startObject("version") - .field("number", build.getQualifiedVersion()) + .field("number", build.qualifiedVersion()) .field("build_flavor", build.flavor().displayName()) .field("build_type", build.type().displayName()) .field("build_hash", build.hash()) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 6614b252dfefa..cb54bff37aead 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -195,7 +195,7 @@ public final void start() { boolean withTotalHits = trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED; listener.onResponse( new SearchResponse( - InternalSearchResponse.empty(withTotalHits), + withTotalHits ? InternalSearchResponse.EMPTY_WITH_TOTAL_HITS : InternalSearchResponse.EMPTY_WITHOUT_TOTAL_HITS, null, 0, 0, @@ -795,7 +795,7 @@ public final ShardSearchRequest buildShardSearchRequest(SearchShardIterator shar getNumShards(), filter, indexBoost, - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), shardIt.getClusterAlias(), shardIt.getSearchContextId(), shardIt.getSearchContextKeepAlive() diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 543aee94866e3..3c3cbfdce63b0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -162,7 +162,7 @@ private void runCoordinatorRewritePhase() { searchShardIterator.getOriginalIndices().indicesOptions(), Collections.emptyList(), getNumShards(), - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), searchShardIterator.getClusterAlias() ); final ShardSearchRequest request = canMatchNodeRequest.createShardSearchRequest(buildShardLevelRequest(searchShardIterator)); @@ -350,30 +350,7 @@ public void onFailure(Exception e) { } } - private static class SendingTarget { - @Nullable - private final String clusterAlias; - @Nullable - private final String nodeId; - - SendingTarget(@Nullable String clusterAlias, @Nullable String nodeId) { - this.clusterAlias = clusterAlias; - this.nodeId = nodeId; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SendingTarget that = (SendingTarget) o; - return Objects.equals(clusterAlias, that.clusterAlias) && Objects.equals(nodeId, that.nodeId); - } - - @Override - public int hashCode() { - return Objects.hash(clusterAlias, nodeId); - } - } + private record SendingTarget(@Nullable String clusterAlias, @Nullable String nodeId) {} private CanMatchNodeRequest createCanMatchRequest(Map.Entry> entry) { final SearchShardIterator first = entry.getValue().get(0); @@ -391,7 +368,7 @@ private CanMatchNodeRequest createCanMatchRequest(Map.Entry(innerHitBuilders.size())); + hit.setInnerHits(Maps.newMapWithExpectedSize(innerHitBuilders.size())); } hit.getInnerHits().put(innerHitBuilder.getName(), innerHits); } diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 743530bfb4c48..9597da9a7ce1d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -123,7 +123,7 @@ private void innerRun() throws Exception { // query AND fetch optimization finishPhase.run(); } else { - ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; + ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs(); final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); // no docs to fetch -- sidestep everything and return if (scoreDocs.length == 0) { diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index a34744ccf4a4a..085f17ef6684c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -354,7 +354,7 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild xContentBuilder.field("index", request.indices()); } if (request.indicesOptions().equals(SearchRequest.DEFAULT_INDICES_OPTIONS) == false) { - WildcardStates.toXContent(request.indicesOptions().getExpandWildcards(), xContentBuilder); + WildcardStates.toXContent(request.indicesOptions().expandWildcards(), xContentBuilder); xContentBuilder.field("ignore_unavailable", request.indicesOptions().ignoreUnavailable()); xContentBuilder.field("allow_no_indices", request.indicesOptions().allowNoIndices()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java index c382833166dcc..6f1e8d429edab 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; /** * A request builder for multiple search requests. diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index c40e622468759..041fdfeca76eb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -25,7 +26,6 @@ import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -126,7 +126,7 @@ public MultiSearchResponse(Item[] items, long tookInMillis) { @Override public Iterator iterator() { - return Arrays.stream(items).iterator(); + return Iterators.forArray(items); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index 739ee17851e6d..a5092498e67a1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -21,7 +21,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContextBuilder; +import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.query.QuerySearchResult; @@ -57,7 +57,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults consumeAggs() { } } - private static class MergeResult { - private final List processedShards; - private final TopDocs reducedTopDocs; - private final InternalAggregations reducedAggs; - private final long estimatedSize; - - private MergeResult( - List processedShards, - TopDocs reducedTopDocs, - InternalAggregations reducedAggs, - long estimatedSize - ) { - this.processedShards = processedShards; - this.reducedTopDocs = reducedTopDocs; - this.reducedAggs = reducedAggs; - this.estimatedSize = estimatedSize; - } - } + private record MergeResult( + List processedShards, + TopDocs reducedTopDocs, + InternalAggregations reducedAggs, + long estimatedSize + ) {} private static class MergeTask { private final List emptyResults; diff --git a/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java index 3aa578598b7f0..09a497747fc4b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestStatusToXContentListener; diff --git a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java index b771d3c3f32cc..f646d80dc354c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.BaseRestHandler; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index edf05a209e613..59691bb8c78a0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -25,14 +25,14 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.dfs.AggregatedDfs; @@ -64,10 +64,10 @@ public final class SearchPhaseController { private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; - private final BiFunction, SearchRequest, InternalAggregation.ReduceContextBuilder> requestToAggReduceContextBuilder; + private final BiFunction, SearchRequest, AggregationReduceContext.Builder> requestToAggReduceContextBuilder; public SearchPhaseController( - BiFunction, SearchRequest, InternalAggregation.ReduceContextBuilder> requestToAggReduceContextBuilder + BiFunction, SearchRequest, AggregationReduceContext.Builder> requestToAggReduceContextBuilder ) { this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder; } @@ -179,12 +179,10 @@ static SortedTopDocs sortDocs( SortField[] sortFields = null; String groupField = null; Object[] groupValues = null; - if (mergedTopDocs instanceof TopFieldDocs) { - TopFieldDocs fieldDocs = (TopFieldDocs) mergedTopDocs; + if (mergedTopDocs instanceof TopFieldDocs fieldDocs) { sortFields = fieldDocs.fields; - if (fieldDocs instanceof TopFieldGroups) { + if (fieldDocs instanceof TopFieldGroups topFieldGroups) { isSortedByField = (fieldDocs.fields.length == 1 && fieldDocs.fields[0].getType() == SortField.Type.SCORE) == false; - TopFieldGroups topFieldGroups = (TopFieldGroups) fieldDocs; groupField = topFieldGroups.field; groupValues = topFieldGroups.groupValues; } else { @@ -203,13 +201,11 @@ static TopDocs mergeTopDocs(Collection results, int topN, int from) { final int numShards = results.size(); if (numShards == 1 && from == 0) { // only one shard and no pagination we can just return the topDocs as we got them. return topDocs; - } else if (topDocs instanceof TopFieldGroups) { - TopFieldGroups firstTopDocs = (TopFieldGroups) topDocs; + } else if (topDocs instanceof TopFieldGroups firstTopDocs) { final Sort sort = new Sort(firstTopDocs.fields); final TopFieldGroups[] shardTopDocs = results.toArray(new TopFieldGroups[numShards]); mergedTopDocs = TopFieldGroups.merge(sort, from, topN, shardTopDocs, false); - } else if (topDocs instanceof TopFieldDocs) { - TopFieldDocs firstTopDocs = (TopFieldDocs) topDocs; + } else if (topDocs instanceof TopFieldDocs firstTopDocs) { final Sort sort = new Sort(firstTopDocs.fields); final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[numShards]); mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs); @@ -272,7 +268,7 @@ public InternalSearchResponse merge( IntFunction resultsLookup ) { if (reducedQueryPhase.isEmptyResult) { - return InternalSearchResponse.empty(); + return InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; } ScoreDoc[] sortedDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResults, resultsLookup); @@ -380,14 +376,14 @@ private SearchHits getHits( * @param queryResults a list of non-null query shard results */ ReducedQueryPhase reducedScrollQueryPhase(Collection queryResults) { - InternalAggregation.ReduceContextBuilder aggReduceContextBuilder = new InternalAggregation.ReduceContextBuilder() { + AggregationReduceContext.Builder aggReduceContextBuilder = new AggregationReduceContext.Builder() { @Override - public ReduceContext forPartialReduction() { + public AggregationReduceContext forPartialReduction() { throw new UnsupportedOperationException("Scroll requests don't have aggs"); } @Override - public ReduceContext forFinalReduction() { + public AggregationReduceContext forFinalReduction() { throw new UnsupportedOperationException("Scroll requests don't have aggs"); } }; @@ -423,7 +419,7 @@ ReducedQueryPhase reducedQueryPhase( TopDocsStats topDocsStats, int numReducePhases, boolean isScrollRequest, - InternalAggregation.ReduceContextBuilder aggReduceContextBuilder, + AggregationReduceContext.Builder aggReduceContextBuilder, boolean performFinalReduce ) { assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; @@ -461,7 +457,7 @@ ReducedQueryPhase reducedQueryPhase( // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) final Map>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); final Map profileShardResults = hasProfileResults - ? new HashMap<>(queryResults.size()) + ? Maps.newMapWithExpectedSize(queryResults.size()) : Collections.emptyMap(); int from = 0; int size = 0; @@ -480,8 +476,7 @@ ReducedQueryPhase reducedQueryPhase( for (Suggestion> suggestion : result.suggest()) { List> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); suggestionList.add(suggestion); - if (suggestion instanceof CompletionSuggestion) { - CompletionSuggestion completionSuggestion = (CompletionSuggestion) suggestion; + if (suggestion instanceof CompletionSuggestion completionSuggestion) { completionSuggestion.setShardIndex(result.getShardIndex()); } } @@ -528,7 +523,7 @@ ReducedQueryPhase reducedQueryPhase( } private static InternalAggregations reduceAggs( - InternalAggregation.ReduceContextBuilder aggReduceContextBuilder, + AggregationReduceContext.Builder aggReduceContextBuilder, boolean performFinalReduce, List toReduce ) { @@ -585,69 +580,41 @@ static int getTopDocsSize(SearchRequest request) { : source.from()); } - public static final class ReducedQueryPhase { + public record ReducedQueryPhase( // the sum of all hits across all reduces shards - final TotalHits totalHits; + TotalHits totalHits, // the number of returned hits (doc IDs) across all reduces shards - final long fetchHits; + long fetchHits, // the max score across all reduces hits or {@link Float#NaN} if no hits returned - final float maxScore; + float maxScore, // true if at least one reduced result timed out - final boolean timedOut; + boolean timedOut, // non null and true if at least one reduced result was terminated early - final Boolean terminatedEarly; + Boolean terminatedEarly, // the reduced suggest results - final Suggest suggest; + Suggest suggest, // the reduced internal aggregations - final InternalAggregations aggregations; + InternalAggregations aggregations, // the reduced profile results - final SearchProfileResultsBuilder profileBuilder; - // the number of reduces phases - final int numReducePhases; + SearchProfileResultsBuilder profileBuilder, // encloses info about the merged top docs, the sort fields used to sort the score docs etc. - final SortedTopDocs sortedTopDocs; + SortedTopDocs sortedTopDocs, + // sort value formats used to sort / format the result + DocValueFormat[] sortValueFormats, + // the number of reduces phases + int numReducePhases, // the size of the top hits to return - final int size; - // true iff the query phase had no results. Otherwise false - final boolean isEmptyResult; + int size, // the offset into the merged top hits - final int from; - // sort value formats used to sort / format the result - final DocValueFormat[] sortValueFormats; - - ReducedQueryPhase( - TotalHits totalHits, - long fetchHits, - float maxScore, - boolean timedOut, - Boolean terminatedEarly, - Suggest suggest, - InternalAggregations aggregations, - SearchProfileResultsBuilder profileBuilder, - SortedTopDocs sortedTopDocs, - DocValueFormat[] sortValueFormats, - int numReducePhases, - int size, - int from, - boolean isEmptyResult - ) { + int from, + // true iff the query phase had no results. Otherwise false + boolean isEmptyResult + ) { + + public ReducedQueryPhase { if (numReducePhases <= 0) { throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases); } - this.totalHits = totalHits; - this.fetchHits = fetchHits; - this.maxScore = maxScore; - this.timedOut = timedOut; - this.terminatedEarly = terminatedEarly; - this.suggest = suggest; - this.aggregations = aggregations; - this.profileBuilder = profileBuilder; - this.numReducePhases = numReducePhases; - this.sortedTopDocs = sortedTopDocs; - this.size = size; - this.from = from; - this.isEmptyResult = isEmptyResult; - this.sortValueFormats = sortValueFormats; } /** @@ -679,7 +646,7 @@ private SearchProfileResults buildSearchProfileResults(Collection isCanceled, SearchRequest request) { + AggregationReduceContext.Builder getReduceContext(Supplier isCanceled, SearchRequest request) { return requestToAggReduceContextBuilder.apply(isCanceled, request); } @@ -771,32 +738,17 @@ void add(TopDocsAndMaxScore topDocs, boolean timedOut, Boolean terminatedEarly) } } - static final class SortedTopDocs { - static final SortedTopDocs EMPTY = new SortedTopDocs(EMPTY_DOCS, false, null, null, null, 0); + record SortedTopDocs( // the searches merged top docs - final ScoreDoc[] scoreDocs; + ScoreDoc[] scoreDocs, // true iff the result score docs is sorted by a field (not score), this implies that sortField is set. - final boolean isSortedByField; + boolean isSortedByField, // the top docs sort fields used to sort the score docs, null if the results are not sorted - final SortField[] sortFields; - final String collapseField; - final Object[] collapseValues; - final int numberOfCompletionsSuggestions; - - SortedTopDocs( - ScoreDoc[] scoreDocs, - boolean isSortedByField, - SortField[] sortFields, - String collapseField, - Object[] collapseValues, - int numberOfCompletionsSuggestions - ) { - this.scoreDocs = scoreDocs; - this.isSortedByField = isSortedByField; - this.sortFields = sortFields; - this.collapseField = collapseField; - this.collapseValues = collapseValues; - this.numberOfCompletionsSuggestions = numberOfCompletionsSuggestions; - } + SortField[] sortFields, + String collapseField, + Object[] collapseValues, + int numberOfCompletionsSuggestions + ) { + static final SortedTopDocs EMPTY = new SortedTopDocs(EMPTY_DOCS, false, null, null, null, 0); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index d9ccf5b2e3d3e..c8418fa6a95c9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -43,14 +43,14 @@ /** * A request to execute search against one or more indices (or all). Best created using - * {@link org.elasticsearch.client.Requests#searchRequest(String...)}. + * {@link org.elasticsearch.client.internal.Requests#searchRequest(String...)}. *

    * Note, the search {@link #source(org.elasticsearch.search.builder.SearchSourceBuilder)} * is required. The search source is the different search options, including aggregations and such. *

    * - * @see org.elasticsearch.client.Requests#searchRequest(String...) - * @see org.elasticsearch.client.Client#search(SearchRequest) + * @see org.elasticsearch.client.internal.Requests#searchRequest(String...) + * @see org.elasticsearch.client.internal.Client#search(SearchRequest) * @see SearchResponse */ public class SearchRequest extends ActionRequest implements IndicesRequest.Replaceable, Rewriteable { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 7e80e02ccb2a4..b8ca2898e3e71 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 3b2caa7bdb989..c4d4b98e96ef1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -24,7 +24,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; @@ -69,7 +69,7 @@ final class SearchResponseMerger { final int size; final int trackTotalHitsUpTo; private final SearchTimeProvider searchTimeProvider; - private final InternalAggregation.ReduceContextBuilder aggReduceContextBuilder; + private final AggregationReduceContext.Builder aggReduceContextBuilder; private final List searchResponses = new CopyOnWriteArrayList<>(); SearchResponseMerger( @@ -77,7 +77,7 @@ final class SearchResponseMerger { int size, int trackTotalHitsUpTo, SearchTimeProvider searchTimeProvider, - InternalAggregation.ReduceContextBuilder aggReduceContextBuilder + AggregationReduceContext.Builder aggReduceContextBuilder ) { this.from = from; this.size = size; @@ -264,8 +264,7 @@ private ShardId extractShardId(ShardSearchFailure failure) { return shard.getShardId(); } Throwable cause = failure.getCause(); - if (cause instanceof ElasticsearchException) { - ElasticsearchException e = (ElasticsearchException) cause; + if (cause instanceof ElasticsearchException e) { return e.getShardId(); } return null; @@ -335,8 +334,7 @@ private static void setSuggestShardIndex( assignShardIndex(shards); for (List> suggestions : groupedSuggestions.values()) { for (Suggest.Suggestion suggestion : suggestions) { - if (suggestion instanceof CompletionSuggestion) { - CompletionSuggestion completionSuggestion = (CompletionSuggestion) suggestion; + if (suggestion instanceof CompletionSuggestion completionSuggestion) { for (CompletionSuggestion.Entry options : completionSuggestion) { for (CompletionSuggestion.Entry.Option option : options) { SearchShardTarget shard = option.getHit().getShard(); @@ -375,8 +373,7 @@ private static SearchHits topDocsToSearchHits(TopDocs topDocs, TopDocsStats topD Object[] groupValues = null; if (topDocs instanceof TopFieldDocs) { sortFields = ((TopFieldDocs) topDocs).fields; - if (topDocs instanceof TopFieldGroups) { - TopFieldGroups topFieldGroups = (TopFieldGroups) topDocs; + if (topDocs instanceof TopFieldGroups topFieldGroups) { groupField = topFieldGroups.field; groupValues = topFieldGroups.groupValues; } @@ -401,31 +398,9 @@ private static final class FieldDocAndSearchHit extends FieldDoc { * make their ShardIds different, which is not the case if the index is really the same one from the same cluster, in which case we * need to look at the cluster alias and make sure to assign a different shardIndex based on that. */ - private static final class ShardIdAndClusterAlias implements Comparable { - private final ShardId shardId; - private final String clusterAlias; - - ShardIdAndClusterAlias(ShardId shardId, String clusterAlias) { - this.shardId = shardId; + private record ShardIdAndClusterAlias(ShardId shardId, String clusterAlias) implements Comparable { + private ShardIdAndClusterAlias { assert clusterAlias != null : "clusterAlias is null"; - this.clusterAlias = clusterAlias; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ShardIdAndClusterAlias that = (ShardIdAndClusterAlias) o; - return shardId.equals(that.shardId) && clusterAlias.equals(that.clusterAlias); - } - - @Override - public int hashCode() { - return Objects.hash(shardId, clusterAlias); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index ffcec31ff340d..603264978af7b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -70,7 +70,7 @@ public void run() { final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedScrollQueryPhase( queryResults.asList() ); - ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; + ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs(); if (scoreDocs.length == 0) { sendResponse(reducedQueryPhase, fetchResults); return; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java index 2720d2b297e18..e8348d189fcbc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.Scroll; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShard.java b/server/src/main/java/org/elasticsearch/action/search/SearchShard.java index 8447cd12e5144..e9dc628410f95 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShard.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchShard.java @@ -18,52 +18,11 @@ * A class that encapsulates the {@link ShardId} and the cluster alias * of a shard used during the search action. */ -public final class SearchShard implements Comparable { - @Nullable - private final String clusterAlias; - private final ShardId shardId; - - public SearchShard(@Nullable String clusterAlias, ShardId shardId) { - this.clusterAlias = clusterAlias; - this.shardId = shardId; - } - - /** - * Return the cluster alias if we are executing a cross cluster search request, null otherwise. - */ - @Nullable - public String getClusterAlias() { - return clusterAlias; - } - - /** - * Return the {@link ShardId} of this shard. - */ - public ShardId getShardId() { - return shardId; - } +public record SearchShard(@Nullable String clusterAlias, ShardId shardId) implements Comparable { @Override public int compareTo(SearchShard o) { int cmp = Objects.compare(clusterAlias, o.clusterAlias, Comparator.nullsFirst(Comparator.naturalOrder())); return cmp != 0 ? cmp : shardId.compareTo(o.shardId); } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SearchShard that = (SearchShard) o; - return Objects.equals(clusterAlias, that.clusterAlias) && shardId.equals(that.shardId); - } - - @Override - public int hashCode() { - return Objects.hash(clusterAlias, shardId); - } - - @Override - public String toString() { - return "SearchShard{" + "clusterAlias='" + clusterAlias + '\'' + ", shardId=" + shardId + '}'; - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 00c74fcad4d26..b7a73e5e41d01 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -17,8 +17,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.OriginSettingClient; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.OriginSettingClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -668,7 +668,7 @@ private boolean assertNodePresent() { } public void cancelSearchTask(SearchTask task, String reason) { - CancelTasksRequest req = new CancelTasksRequest().setTaskId(new TaskId(client.getLocalNodeId(), task.getId())) + CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(new TaskId(client.getLocalNodeId(), task.getId())) .setReason("Fatal failure during search: " + reason); // force the origin to execute the cancellation as a system user new OriginSettingClient(client, GetTaskAction.TASKS_ORIGIN).admin().cluster().cancelTasks(req, ActionListener.wrap(() -> {})); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index a5c6c0f5d324a..648f8e6c8ee05 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.service.ClusterService; @@ -191,14 +191,7 @@ private long buildTookInMillis() { }); } - static final class SearchRequestSlot { + record SearchRequestSlot(SearchRequest request, int responseSlot) { - final SearchRequest request; - final int responseSlot; - - SearchRequestSlot(SearchRequest request, int responseSlot) { - this.request = request; - this.responseSlot = responseSlot; - } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index fb04afe0b268d..549506bc56d8b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -40,19 +40,22 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; @@ -246,11 +249,7 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * to moving backwards due to NTP and other such complexities, etc.). There are also issues with * using a relative clock for reporting real time. Thus, we simply separate these two uses. */ - static final class SearchTimeProvider { - - private final long absoluteStartMillis; - private final long relativeStartNanos; - private final LongSupplier relativeCurrentNanosProvider; + record SearchTimeProvider(long absoluteStartMillis, long relativeStartNanos, LongSupplier relativeCurrentNanosProvider) { /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -259,19 +258,11 @@ static final class SearchTimeProvider { * operation took can be measured against the provided relative clock and the relative start * time. * - * @param absoluteStartMillis the absolute start time in milliseconds since the epoch - * @param relativeStartNanos the relative start time in nanoseconds + * @param absoluteStartMillis the absolute start time in milliseconds since the epoch + * @param relativeStartNanos the relative start time in nanoseconds * @param relativeCurrentNanosProvider provides the current relative time */ - SearchTimeProvider(final long absoluteStartMillis, final long relativeStartNanos, final LongSupplier relativeCurrentNanosProvider) { - this.absoluteStartMillis = absoluteStartMillis; - this.relativeStartNanos = relativeStartNanos; - this.relativeCurrentNanosProvider = relativeCurrentNanosProvider; - } - - long getAbsoluteStartMillis() { - return absoluteStartMillis; - } + SearchTimeProvider {} long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); @@ -351,7 +342,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults results @Override public void run() { final AtomicArray atomicArray = results.getAtomicArray(); - sendSearchResponse(InternalSearchResponse.empty(), atomicArray); + sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, atomicArray); } }; } @@ -402,7 +393,7 @@ private void executeRequest( ); } else { if (shouldMinimizeRoundtrips(rewritten)) { - final TaskId parentTaskId = task.taskInfo(clusterService.localNode().getId(), false).getTaskId(); + final TaskId parentTaskId = task.taskInfo(clusterService.localNode().getId(), false).taskId(); ccsRemoteReduce( parentTaskId, rewritten, @@ -478,7 +469,7 @@ private void executeRequest( } } }, listener::onFailure); - Rewriteable.rewriteAndFetch(original, searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), rewriteListener); + Rewriteable.rewriteAndFetch(original, searchService.getRewriteContext(timeProvider::absoluteStartMillis), rewriteListener); } static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { @@ -507,7 +498,7 @@ static void ccsRemoteReduce( OriginalIndices localIndices, Map remoteIndices, SearchTimeProvider timeProvider, - InternalAggregation.ReduceContextBuilder aggReduceContextBuilder, + AggregationReduceContext.Builder aggReduceContextBuilder, RemoteClusterService remoteClusterService, ThreadPool threadPool, ActionListener listener, @@ -526,7 +517,7 @@ static void ccsRemoteReduce( searchRequest, indices.indices(), clusterAlias, - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), true ); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); @@ -589,7 +580,7 @@ public void onFailure(Exception e) { searchRequest, indices.indices(), clusterAlias, - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), false ); ActionListener ccsListener = createCCSListener( @@ -621,7 +612,7 @@ public void onFailure(Exception e) { searchRequest, localIndices.indices(), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), false ); localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); @@ -632,7 +623,7 @@ public void onFailure(Exception e) { static SearchResponseMerger createSearchResponseMerger( SearchSourceBuilder source, SearchTimeProvider timeProvider, - InternalAggregation.ReduceContextBuilder aggReduceContextBuilder + AggregationReduceContext.Builder aggReduceContextBuilder ) { final int from; final int size; @@ -877,7 +868,7 @@ private Index[] resolveLocalIndices(OriginalIndices localIndices, ClusterState c } List frozenIndices = null; - Index[] indices = indexNameExpressionResolver.concreteIndices(clusterState, localIndices, timeProvider.getAbsoluteStartMillis()); + Index[] indices = indexNameExpressionResolver.concreteIndices(clusterState, localIndices, timeProvider.absoluteStartMillis()); for (Index index : indices) { IndexMetadata indexMetadata = clusterState.metadata().index(index); if (indexMetadata.getSettings().getAsBoolean("index.frozen", false)) { @@ -914,6 +905,10 @@ private void executeSearch( ) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); + if (searchRequest.allowPartialSearchResults() == null) { + // No user preference defined in search request - apply cluster service default + searchRequest.allowPartialSearchResults(searchService.defaultAllowPartialSearchResults()); + } // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead @@ -931,7 +926,8 @@ private void executeSearch( localIndices, searchRequest.getLocalClusterAlias(), searchContext, - searchRequest.pointInTimeBuilder().getKeepAlive() + searchRequest.pointInTimeBuilder().getKeepAlive(), + searchRequest.allowPartialSearchResults() ); } else { final Index[] indices = resolveLocalIndices(localIndices, clusterState, timeProvider); @@ -988,18 +984,13 @@ private void executeSearch( // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard searchRequest.searchType(QUERY_THEN_FETCH); } - if (searchRequest.allowPartialSearchResults() == null) { - // No user preference defined in search request - apply cluster service default - searchRequest.allowPartialSearchResults(searchService.defaultAllowPartialSearchResults()); - } if (searchRequest.isSuggestOnly()) { // disable request cache if we have only suggest searchRequest.requestCache(false); switch (searchRequest.searchType()) { - case DFS_QUERY_THEN_FETCH: + case DFS_QUERY_THEN_FETCH -> // convert to Q_T_F if we have only suggest searchRequest.searchType(QUERY_THEN_FETCH); - break; } } final DiscoveryNodes nodes = clusterState.nodes(); @@ -1177,7 +1168,7 @@ private SearchPhase searchAsyncAction( return action; }, clusters, - searchService.getCoordinatorRewriteContextProvider(timeProvider::getAbsoluteStartMillis) + searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis) ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1189,49 +1180,42 @@ private SearchPhase searchAsyncAction( shardIterators.size(), exc -> searchTransportService.cancelSearchTask(task, "failed to merge result [" + exc.getMessage() + "]") ); - AbstractSearchAsyncAction searchAsyncAction; - switch (searchRequest.searchType()) { - case DFS_QUERY_THEN_FETCH: - searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction( - logger, - searchTransportService, - connectionLookup, - aliasFilter, - concreteIndexBoosts, - searchPhaseController, - executor, - queryResultConsumer, - searchRequest, - listener, - shardIterators, - timeProvider, - clusterState, - task, - clusters - ); - break; - case QUERY_THEN_FETCH: - searchAsyncAction = new SearchQueryThenFetchAsyncAction( - logger, - searchTransportService, - connectionLookup, - aliasFilter, - concreteIndexBoosts, - searchPhaseController, - executor, - queryResultConsumer, - searchRequest, - listener, - shardIterators, - timeProvider, - clusterState, - task, - clusters - ); - break; - default: - throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]"); - } + AbstractSearchAsyncAction searchAsyncAction = switch (searchRequest.searchType()) { + case DFS_QUERY_THEN_FETCH -> new SearchDfsQueryThenFetchAsyncAction( + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + searchPhaseController, + executor, + queryResultConsumer, + searchRequest, + listener, + shardIterators, + timeProvider, + clusterState, + task, + clusters + ); + case QUERY_THEN_FETCH -> new SearchQueryThenFetchAsyncAction( + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + searchPhaseController, + executor, + queryResultConsumer, + searchRequest, + listener, + shardIterators, + timeProvider, + clusterState, + task, + clusters + ); + }; return searchAsyncAction; } } @@ -1243,7 +1227,7 @@ private static void validateAndResolveWaitForCheckpoint( String[] concreteLocalIndices ) { HashSet searchedIndices = new HashSet<>(Arrays.asList(concreteLocalIndices)); - HashMap newWaitForCheckpoints = new HashMap<>(searchRequest.getWaitForCheckpoints().size()); + Map newWaitForCheckpoints = Maps.newMapWithExpectedSize(searchRequest.getWaitForCheckpoints().size()); for (Map.Entry waitForCheckpointIndex : searchRequest.getWaitForCheckpoints().entrySet()) { long[] checkpoints = waitForCheckpointIndex.getValue(); int checkpointsProvided = checkpoints.length; @@ -1413,22 +1397,35 @@ static List getLocalLocalShardsIteratorFromPointInTime( OriginalIndices originalIndices, String localClusterAlias, SearchContextId searchContext, - TimeValue keepAlive + TimeValue keepAlive, + boolean allowPartialSearchResults ) { final List iterators = new ArrayList<>(searchContext.shards().size()); for (Map.Entry entry : searchContext.shards().entrySet()) { final SearchContextIdForNode perNode = entry.getValue(); if (Strings.isEmpty(perNode.getClusterAlias())) { final ShardId shardId = entry.getKey(); - final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); - final List targetNodes = new ArrayList<>(shards.size()); - targetNodes.add(perNode.getNode()); - if (perNode.getSearchContextId().getSearcherId() != null) { - for (ShardRouting shard : shards) { - if (shard.currentNodeId().equals(perNode.getNode()) == false) { - targetNodes.add(shard.currentNodeId()); + final List targetNodes = new ArrayList<>(2); + // Prefer executing shard requests on nodes that are part of PIT first. + if (clusterState.nodes().nodeExists(perNode.getNode())) { + targetNodes.add(perNode.getNode()); + } + try { + final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); + if (perNode.getSearchContextId().getSearcherId() != null) { + for (ShardRouting shard : shards) { + if (shard.currentNodeId().equals(perNode.getNode()) == false) { + targetNodes.add(shard.currentNodeId()); + } } } + } catch (IndexNotFoundException | ShardNotFoundException e) { + // We can hit these exceptions if the index was deleted after creating PIT or the cluster state on + // this coordinating node is outdated. It's fine to ignore these extra "retry-able" target shards + // when allowPartialSearchResults is false + if (allowPartialSearchResults == false) { + throw e; + } } OriginalIndices finalIndices = new OriginalIndices( new String[] { shardId.getIndexName() }, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index 0f8d49e3f8d39..537e98693e831 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -45,10 +45,19 @@ public TransportSearchScrollAction( protected void doExecute(Task task, SearchScrollRequest request, ActionListener listener) { try { ParsedScrollId scrollId = parseScrollId(request.scrollId()); - Runnable action; - switch (scrollId.getType()) { - case QUERY_THEN_FETCH_TYPE: - action = new SearchScrollQueryThenFetchAsyncAction( + Runnable action = switch (scrollId.getType()) { + case QUERY_THEN_FETCH_TYPE -> new SearchScrollQueryThenFetchAsyncAction( + logger, + clusterService, + searchTransportService, + searchPhaseController, + request, + (SearchTask) task, + scrollId, + listener + ); + case QUERY_AND_FETCH_TYPE -> // TODO can we get rid of this? + new SearchScrollQueryAndFetchAsyncAction( logger, clusterService, searchTransportService, @@ -58,22 +67,8 @@ protected void doExecute(Task task, SearchScrollRequest request, ActionListener< scrollId, listener ); - break; - case QUERY_AND_FETCH_TYPE: // TODO can we get rid of this? - action = new SearchScrollQueryAndFetchAsyncAction( - logger, - clusterService, - searchTransportService, - searchPhaseController, - request, - (SearchTask) task, - scrollId, - listener - ); - break; - default: - throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized"); - } + default -> throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized"); + }; action.run(); } catch (Exception e) { listener.onFailure(e); diff --git a/server/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java b/server/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java index 6635604ba29cd..e58bec22ee9b8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java +++ b/server/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.support; -import com.carrotsearch.hppc.cursors.IntObjectCursor; - import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -19,6 +17,7 @@ import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; +import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS; @@ -26,7 +25,7 @@ * A class whose instances represent a value for counting the number * of active shard copies for a given shard in an index. */ -public final class ActiveShardCount implements Writeable { +public record ActiveShardCount(int value) implements Writeable { private static final int ACTIVE_SHARD_COUNT_DEFAULT = -2; private static final int ALL_ACTIVE_SHARDS = -1; @@ -36,12 +35,6 @@ public final class ActiveShardCount implements Writeable { public static final ActiveShardCount NONE = new ActiveShardCount(0); public static final ActiveShardCount ONE = new ActiveShardCount(1); - private final int value; - - private ActiveShardCount(final int value) { - this.value = value; - } - /** * Get an ActiveShardCount instance for the given value. The value is first validated to ensure * it is a valid shard count and throws an IllegalArgumentException if validation fails. Valid @@ -160,8 +153,8 @@ public boolean enoughShardsActive(final ClusterState clusterState, final String. if (waitForActiveShards == ActiveShardCount.DEFAULT) { waitForActiveShards = SETTING_WAIT_FOR_ACTIVE_SHARDS.get(indexMetadata.getSettings()); } - for (final IntObjectCursor shardRouting : indexRoutingTable.getShards()) { - if (waitForActiveShards.enoughShardsActive(shardRouting.value) == false) { + for (final Map.Entry shardRouting : indexRoutingTable.getShards().entrySet()) { + if (waitForActiveShards.enoughShardsActive(shardRouting.getValue()) == false) { // not enough active shard copies yet return false; } @@ -188,33 +181,13 @@ public boolean enoughShardsActive(final IndexShardRoutingTable shardRoutingTable } } - @Override - public int hashCode() { - return Integer.hashCode(value); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ActiveShardCount that = (ActiveShardCount) o; - return value == that.value; - } - @Override public String toString() { - switch (value) { - case ALL_ACTIVE_SHARDS: - return "ALL"; - case ACTIVE_SHARD_COUNT_DEFAULT: - return "DEFAULT"; - default: - return Integer.toString(value); - } + return switch (value) { + case ALL_ACTIVE_SHARDS -> "ALL"; + case ACTIVE_SHARD_COUNT_DEFAULT -> "DEFAULT"; + default -> Integer.toString(value); + }; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java index 6e04e65f04434..8edc7e591bbe6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java @@ -8,6 +8,9 @@ package org.elasticsearch.action.support; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; @@ -17,6 +20,8 @@ public final class ChannelActionListener { + private static final Logger logger = LogManager.getLogger(); + private final TransportChannel channel; private final Request request; private final String actionName; @@ -38,7 +43,15 @@ public void onResponse(Response response) { @Override public void onFailure(Exception e) { - TransportChannel.sendErrorResponse(channel, actionName, request, e); + try { + channel.sendResponse(e); + } catch (Exception sendException) { + sendException.addSuppressed(e); + logger.warn( + () -> new ParameterizedMessage("Failed to send error response for action [{}] and request [{}]", actionName, request), + sendException + ); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index b7048cf243585..921bab2832c2b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -34,7 +34,7 @@ * Controls how to deal with unavailable concrete indices (closed or missing), how wildcard expressions are expanded * to actual indices (all, closed or open indices) and how to deal with wildcard expressions that resolve to no indices. */ -public class IndicesOptions implements ToXContentFragment { +public record IndicesOptions(EnumSet