diff --git a/.buildkite/scripts/get-latest-test-mutes.sh b/.buildkite/scripts/get-latest-test-mutes.sh index 5721e29f1b773..1dafcebec24b1 100755 --- a/.buildkite/scripts/get-latest-test-mutes.sh +++ b/.buildkite/scripts/get-latest-test-mutes.sh @@ -1,6 +1,6 @@ #!/bin/bash -if [[ ! "${BUILDKITE_PULL_REQUEST:-}" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then +if [[ "${BUILDKITE_PULL_REQUEST:-false}" == "false" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then exit 0 fi diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 25cfae6c9803a..632bae64389a3 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -1,4 +1,5 @@ import org.elasticsearch.gradle.internal.test.TestUtil +import org.elasticsearch.gradle.OS /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one @@ -77,7 +78,7 @@ tasks.register("copyPainless", Copy) { } tasks.named("run").configure { - executable = "${buildParams.runtimeJavaHome.get()}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index" dependsOn "copyExpression", "copyPainless", configurations.nativeLib systemProperty 'es.nativelibs.path', TestUtil.getTestLibraryPath(file("../libs/native/libraries/build/platform/").toString()) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index 6dfb337a22ac4..24ba0740cfe26 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -22,7 +22,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off - WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:eef54b3a414aa53b98f0f8df2633aed83c3ba6230722769282925442968f0364", + WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:dd66beec64a7f9b19c6c35a1195153b2b630a55e16ec71949ed5187c5947eea1", "-wolfi", "apk" ), diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java index ab28a66d93065..fac7d86701d5b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java @@ -122,7 +122,7 @@ public void apply(Project project) { composeExtension.getRemoveContainers().set(true); composeExtension.getCaptureContainersOutput() .set(EnumSet.of(LogLevel.INFO, LogLevel.DEBUG).contains(project.getGradle().getStartParameter().getLogLevel())); - composeExtension.getUseDockerComposeV2().set(false); + composeExtension.getUseDockerComposeV2().set(true); composeExtension.getExecutable().set(this.providerFactory.provider(() -> { String composePath = dockerSupport.get().getDockerAvailability().dockerComposePath(); LOGGER.debug("Docker Compose path: {}", composePath); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 9e5fc1f09ac9e..2e313fa73c4ee 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -26,7 +26,7 @@ public interface TestClustersAware extends Task { Collection getClusters(); @ServiceReference(REGISTRY_SERVICE_NAME) - Property getRegistery(); + Property getRegistry(); @ServiceReference(TEST_CLUSTER_TASKS_SERVICE) Property getTasksService(); @@ -47,6 +47,14 @@ default void useCluster(ElasticsearchCluster cluster) { getClusters().add(cluster); } + default Provider getClusterInfo(String clusterName) { + return getProject().getProviders().of(TestClusterValueSource.class, source -> { + source.getParameters().getService().set(getRegistry()); + source.getParameters().getClusterName().set(clusterName); + source.getParameters().getPath().set(getProject().getIsolated().getPath()); + }); + } + default void useCluster(Provider cluster) { useCluster(cluster.get()); } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index ada31bc11a653..c3dc49a2683f2 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -249,7 +249,7 @@ private void configureStartClustersHook(Gradle gradle) { .forEach(awareTask -> { awareTask.doFirst(task -> { awareTask.beforeStart(); - awareTask.getClusters().forEach(awareTask.getRegistery().get()::maybeStartCluster); + awareTask.getClusters().forEach(awareTask.getRegistry().get()::maybeStartCluster); }); }); }); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java index 8d2a9217e7d0c..dcfe7c29a52be 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java @@ -109,6 +109,23 @@ public void restart(String path, String clusterName) { cluster.restart(); } + public void nextNodeToNextVersion(Provider cluster) { + nextNodeToNextVersion(cluster.get()); + } + + public void nextNodeToNextVersion(ElasticsearchCluster cluster) { + nextNodeToNextVersion(cluster.getPath(), cluster.getName()); + } + + public void nextNodeToNextVersion(String path, String clusterName) { + ElasticsearchCluster cluster = runningClusters.stream() + .filter(c -> c.getPath().equals(path)) + .filter(c -> c.getName().equals(clusterName)) + .findFirst() + .orElseThrow(); + cluster.nextNodeToNextVersion(); + } + public void storeProcess(String id, Process esProcess) { nodeProcesses.put(id, esProcess); } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java index 1e57d9fab7cfd..c3b9768946767 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java @@ -187,12 +187,20 @@ static String agentCommandLineOption(Path agentJar, Path tmpPropertiesFile) { static void extractSecureSettings(SecureSettings secrets, Map propertiesMap) { final Set settingNames = secrets.getSettingNames(); for (String key : List.of("api_key", "secret_token")) { - String prefix = "telemetry."; - if (settingNames.contains(prefix + key)) { - try (SecureString token = secrets.getString(prefix + key)) { - propertiesMap.put(key, token.toString()); + for (String prefix : List.of("telemetry.", "tracing.apm.")) { + if (settingNames.contains(prefix + key)) { + if (propertiesMap.containsKey(key)) { + throw new IllegalStateException( + Strings.format("Duplicate telemetry setting: [telemetry.%s] and [tracing.apm.%s]", key, key) + ); + } + + try (SecureString token = secrets.getString(prefix + key)) { + propertiesMap.put(key, token.toString()); + } } } + } } @@ -219,12 +227,44 @@ private static Map extractDynamicSettings(Map pr static Map extractApmSettings(Settings settings) throws UserException { final Map propertiesMap = new HashMap<>(); + // tracing.apm.agent. is deprecated by telemetry.agent. final String telemetryAgentPrefix = "telemetry.agent."; + final String deprecatedTelemetryAgentPrefix = "tracing.apm.agent."; final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix); telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key)))); + final Settings apmAgentSettings = settings.getByPrefix(deprecatedTelemetryAgentPrefix); + for (String key : apmAgentSettings.keySet()) { + if (propertiesMap.containsKey(key)) { + throw new IllegalStateException( + Strings.format( + "Duplicate telemetry setting: [%s%s] and [%s%s]", + telemetryAgentPrefix, + key, + deprecatedTelemetryAgentPrefix, + key + ) + ); + } + propertiesMap.put(key, String.valueOf(apmAgentSettings.get(key))); + } + StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings); + if (globalLabels.length() == 0) { + globalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings); + } else { + StringJoiner tracingGlobalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings); + if (tracingGlobalLabels.length() != 0) { + throw new IllegalArgumentException( + "Cannot have global labels with tracing.agent prefix [" + + globalLabels + + "] and telemetry.apm.agent prefix [" + + tracingGlobalLabels + + "]" + ); + } + } if (globalLabels.length() > 0) { propertiesMap.put("global_labels", globalLabels.toString()); } @@ -234,7 +274,7 @@ static Map extractApmSettings(Settings settings) throws UserExce if (propertiesMap.containsKey(key)) { throw new UserException( ExitCodes.CONFIG, - "Do not set a value for [telemetry.agent." + key + "], as this is configured automatically by Elasticsearch" + "Do not set a value for [tracing.apm.agent." + key + "], as this is configured automatically by Elasticsearch" ); } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java index 0e067afc1aa73..a7ba8eb11fbcc 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java @@ -25,15 +25,18 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -79,63 +82,109 @@ public void testFileDeleteWorks() throws IOException { } public void testExtractSecureSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("telemetry.secret_token", "token"); - secureSettings.setString("telemetry.api_key", "key"); + MockSecureSettings duplicateSecureSettings = new MockSecureSettings(); - Map propertiesMap = new HashMap<>(); - APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap); + for (String prefix : List.of("telemetry.", "tracing.apm.")) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(prefix + "secret_token", "token"); + secureSettings.setString(prefix + "api_key", "key"); - assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key"))); - } + duplicateSecureSettings.setString(prefix + "api_key", "secret"); - public void testExtractSettings() throws UserException { - Settings defaults = Settings.builder() - .put("telemetry.agent.server_url", "https://myurl:443") - .put("telemetry.agent.service_node_name", "instance-0000000001") - .build(); + Map propertiesMap = new HashMap<>(); + APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap); + + assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key"))); + } - var name = "APM Tracing"; - var deploy = "123"; - var org = "456"; - var extracted = APMJvmOptions.extractApmSettings( - Settings.builder() - .put(defaults) - .put("telemetry.agent.global_labels.deployment_name", name) - .put("telemetry.agent.global_labels.deployment_id", deploy) - .put("telemetry.agent.global_labels.organization_id", org) - .build() + Exception exception = expectThrows( + IllegalStateException.class, + () -> APMJvmOptions.extractSecureSettings(duplicateSecureSettings, new HashMap<>()) ); + assertThat(exception.getMessage(), containsString("Duplicate telemetry setting")); + assertThat(exception.getMessage(), containsString("telemetry.api_key")); + assertThat(exception.getMessage(), containsString("tracing.apm.api_key")); - assertThat( - extracted, - allOf( - hasEntry("server_url", "https://myurl:443"), - hasEntry("service_node_name", "instance-0000000001"), - hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one - not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys + } + + public void testExtractSettings() throws UserException { + Function buildSettings = (prefix) -> Settings.builder() + .put(prefix + "server_url", "https://myurl:443") + .put(prefix + "service_node_name", "instance-0000000001"); + + for (String prefix : List.of("tracing.apm.agent.", "telemetry.agent.")) { + var name = "APM Tracing"; + var deploy = "123"; + var org = "456"; + var extracted = APMJvmOptions.extractApmSettings( + buildSettings.apply(prefix) + .put(prefix + "global_labels.deployment_name", name) + .put(prefix + "global_labels.deployment_id", deploy) + .put(prefix + "global_labels.organization_id", org) + .build() + ); + + assertThat( + extracted, + allOf( + hasEntry("server_url", "https://myurl:443"), + hasEntry("service_node_name", "instance-0000000001"), + hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one + not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys + ) + ); + + List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); + assertThat(labels, hasSize(3)); + assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy)); + + // test replacing with underscores and skipping empty + name = "APM=Tracing"; + deploy = ""; + org = ",456"; + extracted = APMJvmOptions.extractApmSettings( + buildSettings.apply(prefix) + .put(prefix + "global_labels.deployment_name", name) + .put(prefix + "global_labels.deployment_id", deploy) + .put(prefix + "global_labels.organization_id", org) + .build() + ); + labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); + assertThat(labels, hasSize(2)); + assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456")); + } + + IllegalStateException err = expectThrows( + IllegalStateException.class, + () -> APMJvmOptions.extractApmSettings( + Settings.builder() + .put("tracing.apm.agent.server_url", "https://myurl:443") + .put("telemetry.agent.server_url", "https://myurl-2:443") + .build() ) ); + assertThat(err.getMessage(), is("Duplicate telemetry setting: [telemetry.agent.server_url] and [tracing.apm.agent.server_url]")); + } + + public void testNoMixedLabels() { + String telemetryAgent = "telemetry.agent."; + String tracingAgent = "tracing.apm.agent."; + Settings settings = Settings.builder() + .put("tracing.apm.enabled", true) + .put(telemetryAgent + "server_url", "https://myurl:443") + .put(telemetryAgent + "service_node_name", "instance-0000000001") + .put(tracingAgent + "global_labels.deployment_id", "123") + .put(telemetryAgent + "global_labels.organization_id", "456") + .build(); - List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); - assertThat(labels, hasSize(3)); - assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy)); - - // test replacing with underscores and skipping empty - name = "APM=Tracing"; - deploy = ""; - org = ",456"; - extracted = APMJvmOptions.extractApmSettings( - Settings.builder() - .put(defaults) - .put("telemetry.agent.global_labels.deployment_name", name) - .put("telemetry.agent.global_labels.deployment_id", deploy) - .put("telemetry.agent.global_labels.organization_id", org) - .build() + IllegalArgumentException err = assertThrows(IllegalArgumentException.class, () -> APMJvmOptions.extractApmSettings(settings)); + assertThat( + err.getMessage(), + is( + "Cannot have global labels with tracing.agent prefix [organization_id=456] and" + + " telemetry.apm.agent prefix [deployment_id=123]" + ) ); - labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); - assertThat(labels, hasSize(2)); - assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456")); } private Path makeFakeAgentJar() throws IOException { diff --git a/docs/changelog/118188.yaml b/docs/changelog/118188.yaml new file mode 100644 index 0000000000000..f24651231b7a0 --- /dev/null +++ b/docs/changelog/118188.yaml @@ -0,0 +1,5 @@ +pr: 118188 +summary: Check for early termination in Driver +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/119575.yaml b/docs/changelog/119575.yaml new file mode 100644 index 0000000000000..daa7e69118acc --- /dev/null +++ b/docs/changelog/119575.yaml @@ -0,0 +1,6 @@ +pr: 119575 +summary: Fix realtime get of nested fields with synthetic source +area: Mapping +type: bug +issues: + - 119553 diff --git a/docs/changelog/119679.yaml b/docs/changelog/119679.yaml new file mode 100644 index 0000000000000..a3fb36bcd01c3 --- /dev/null +++ b/docs/changelog/119679.yaml @@ -0,0 +1,5 @@ +pr: 119679 +summary: Support mTLS for the Elastic Inference Service integration inside the inference API +area: Machine Learning +type: feature +issues: [] diff --git a/docs/changelog/119926.yaml b/docs/changelog/119926.yaml deleted file mode 100644 index 3afafd5b2117f..0000000000000 --- a/docs/changelog/119926.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 119926 -summary: "Deprecated tracing.apm.* settings got removed." -area: Infra/Metrics -type: breaking -issues: [] -breaking: - title: "Deprecated tracing.apm.* settings got removed." - area: Cluster and node setting - details: Deprecated `tracing.apm.*` settings got removed, use respective `telemetry.*` / `telemetry.tracing.*` settings instead. - impact: 9.x nodes will refuse to start if any such setting (including secret settings) is still present. - notable: false diff --git a/docs/changelog/120020.yaml b/docs/changelog/120020.yaml new file mode 100644 index 0000000000000..55a80187dbff4 --- /dev/null +++ b/docs/changelog/120020.yaml @@ -0,0 +1,5 @@ +pr: 120020 +summary: Resume Driver on cancelled or early finished +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/120055.yaml b/docs/changelog/120055.yaml new file mode 100644 index 0000000000000..05f66523d0ef8 --- /dev/null +++ b/docs/changelog/120055.yaml @@ -0,0 +1,5 @@ +pr: 120055 +summary: Optimize loading mappings when determining synthetic source usage and whether host.name can be sorted on. +area: Logs +type: enhancement +issues: [] diff --git a/docs/changelog/120143.yaml b/docs/changelog/120143.yaml new file mode 100644 index 0000000000000..7e8cd5a8ceaeb --- /dev/null +++ b/docs/changelog/120143.yaml @@ -0,0 +1,6 @@ +pr: 120143 +summary: Esql - support date nanos in date format function +area: ES|QL +type: enhancement +issues: + - 109994 diff --git a/docs/changelog/120193.yaml b/docs/changelog/120193.yaml new file mode 100644 index 0000000000000..18858e81d9b6c --- /dev/null +++ b/docs/changelog/120193.yaml @@ -0,0 +1,5 @@ +pr: 120193 +summary: "Do not capture `ClusterChangedEvent` in `IndicesStore` call to #onClusterStateShardsClosed" +area: Store +type: bug +issues: [] diff --git a/docs/changelog/120198.yaml b/docs/changelog/120198.yaml new file mode 100644 index 0000000000000..076a2be942a36 --- /dev/null +++ b/docs/changelog/120198.yaml @@ -0,0 +1,5 @@ +pr: 120198 +summary: Bump `TrialLicenseVersion` to allow starting new trial on 9.0 +area: License +type: enhancement +issues: [] diff --git a/docs/changelog/120200.yaml b/docs/changelog/120200.yaml new file mode 100644 index 0000000000000..abde91aec0dff --- /dev/null +++ b/docs/changelog/120200.yaml @@ -0,0 +1,5 @@ +pr: 120200 +summary: "[Connector API] Support hard deletes with new URL param in delete endpoint" +area: Extract&Transform +type: feature +issues: [] diff --git a/docs/changelog/120207.yaml b/docs/changelog/120207.yaml new file mode 100644 index 0000000000000..c01dfc6aecf78 --- /dev/null +++ b/docs/changelog/120207.yaml @@ -0,0 +1,5 @@ +pr: 120207 +summary: Make `requests_per_second` configurable to throttle reindexing +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/120231.yaml b/docs/changelog/120231.yaml new file mode 100644 index 0000000000000..58fba0256c54d --- /dev/null +++ b/docs/changelog/120231.yaml @@ -0,0 +1,5 @@ +pr: 120231 +summary: Add sanity check to `ReindexDatastreamIndexAction` +area: Data streams +type: enhancement +issues: [] diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc index f161a3c3b5933..a324630cc8a52 100644 --- a/docs/reference/connector/apis/delete-connector-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-api.asciidoc @@ -13,7 +13,7 @@ beta::[] For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. -- -Soft-deletes a connector and removes associated sync jobs. +Deletes a connector and optionally removes associated sync jobs. Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually. @@ -37,6 +37,9 @@ To get started with Connector APIs, check out <`:: (Required, string) +``:: +(Optional, boolean) If `true`, the connector doc is deleted. If `false`, connector doc is marked as deleted (soft deletion). Defaults to `false`. + `delete_sync_jobs`:: (Optional, boolean) A flag indicating if associated sync jobs should be also removed. Defaults to `false`. diff --git a/docs/reference/esql/functions/kibana/definition/date_format.json b/docs/reference/esql/functions/kibana/definition/date_format.json index 629415da30fa2..f6f48e9df82b0 100644 --- a/docs/reference/esql/functions/kibana/definition/date_format.json +++ b/docs/reference/esql/functions/kibana/definition/date_format.json @@ -16,6 +16,18 @@ "variadic" : false, "returnType" : "keyword" }, + { + "params" : [ + { + "name" : "dateFormat", + "type" : "date_nanos", + "optional" : true, + "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { @@ -34,6 +46,24 @@ "variadic" : false, "returnType" : "keyword" }, + { + "params" : [ + { + "name" : "dateFormat", + "type" : "keyword", + "optional" : true, + "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`." + }, + { + "name" : "date", + "type" : "date_nanos", + "optional" : false, + "description" : "Date expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { @@ -51,6 +81,24 @@ ], "variadic" : false, "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "dateFormat", + "type" : "text", + "optional" : true, + "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`." + }, + { + "name" : "date", + "type" : "date_nanos", + "optional" : false, + "description" : "Date expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/match_operator.json b/docs/reference/esql/functions/kibana/definition/match_operator.json index c8cbf1cf9d966..b58f9d5835a2d 100644 --- a/docs/reference/esql/functions/kibana/definition/match_operator.json +++ b/docs/reference/esql/functions/kibana/definition/match_operator.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "operator", "name" : "match_operator", - "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on text fields, as well as other field types like boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.", + "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on fields from the text family like <> and <>,\nas well as other field types like keyword, boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/docs/match_operator.md b/docs/reference/esql/functions/kibana/docs/match_operator.md index 7681c2d1ce231..98f55aacde0b8 100644 --- a/docs/reference/esql/functions/kibana/docs/match_operator.md +++ b/docs/reference/esql/functions/kibana/docs/match_operator.md @@ -6,7 +6,8 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ Use `MATCH` to perform a <> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. -Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. +Match can be used on fields from the text family like <> and <>, +as well as other field types like keyword, boolean, dates, and numeric types. For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. diff --git a/docs/reference/esql/functions/types/date_format.asciidoc b/docs/reference/esql/functions/types/date_format.asciidoc index 580094e9be906..c8f4942d98a62 100644 --- a/docs/reference/esql/functions/types/date_format.asciidoc +++ b/docs/reference/esql/functions/types/date_format.asciidoc @@ -6,6 +6,9 @@ |=== dateFormat | date | result date | | keyword +date_nanos | | keyword keyword | date | keyword +keyword | date_nanos | keyword text | date | keyword +text | date_nanos | keyword |=== diff --git a/docs/reference/inference/update-inference.asciidoc b/docs/reference/inference/update-inference.asciidoc index d3a90f5d84e65..441c21629da56 100644 --- a/docs/reference/inference/update-inference.asciidoc +++ b/docs/reference/inference/update-inference.asciidoc @@ -19,9 +19,9 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo [[update-inference-api-request]] ==== {api-request-title} -`POST _inference//_update` +`PUT _inference//_update` -`POST _inference///_update` +`PUT _inference///_update` [discrete] @@ -52,7 +52,7 @@ Click the links to review the service configuration details: * <> (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * <> (`sparse_embedding`) * <> (`completion`, `text_embedding`) -* <> (`rerank`, `text_embedding`) +* <> (`rerank`, `text_embedding`) * <> (`text_embedding`) * <> (`text_embedding`) * <> (`completion`, `text_embedding`) @@ -81,7 +81,7 @@ The following example shows how to update an API key of an {infer} endpoint call [source,console] ------------------------------------------------------------ -POST _inference/my-inference-endpoint/_update +PUT _inference/my-inference-endpoint/_update { "service_settings": { "api_key": "" diff --git a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc index 82263c98e9112..766886a0b48a2 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc @@ -384,6 +384,7 @@ A collection of model size stats fields. `model_size_bytes`::: (integer) The size of the model in bytes. +This parameter applies only to PyTorch models. `required_native_memory_bytes`::: (integer) diff --git a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc index 4f583319ca383..03777afbd6eea 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc @@ -131,6 +131,7 @@ The free-text description of the trained model. `model_size_bytes`::: (integer) The estimated model size in bytes to keep the trained model in memory. +This parameter applies only to {dfanalytics} trained models. `estimated_operations`::: (integer) diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 9c0f0092214ed..68ecc469c6cbd 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -332,7 +332,7 @@ See <>. See <>. // [END] Security redirects -[roles="exclude",id="modules-scripting-stored-scripts"] +[role="exclude",id="modules-scripting-stored-scripts"] === Stored scripts See <> diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 5f9e92c575793..8d3768817e856 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -22,7 +22,7 @@ The following APIs support {ccs}: * experimental:[] <> * experimental:[] <> * experimental:[] <> -* experimental:[] <> +* experimental:[] <> [discrete] === Prerequisites diff --git a/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc b/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc index c07e92c058991..5d74ca66ee6b3 100644 --- a/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc +++ b/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc @@ -78,35 +78,31 @@ A shard can become unassigned for several reasons. The following tips outline th most common causes and their solutions. [discrete] -[[fix-cluster-status-reenable-allocation]] -===== Re-enable shard allocation +[[fix-cluster-status-only-one-node]] +===== Single node cluster -You typically disable allocation during a <> or other -cluster maintenance. If you forgot to re-enable allocation afterward, {es} will -be unable to assign shards. To re-enable allocation, reset the -`cluster.routing.allocation.enable` cluster setting. +{es} will never assign a replica to the same node as the primary shard. A single-node cluster will always have yellow status. To change to green, set <> to 0 for all indices. -[source,console] ----- -PUT _cluster/settings -{ - "persistent" : { - "cluster.routing.allocation.enable" : null - } -} ----- - -See https://www.youtube.com/watch?v=MiKKUdZvwnI[this video] for walkthrough of troubleshooting "no allocations are allowed". +Therefore, if the number of replicas equals or exceeds the number of nodes, some shards won't be allocated. [discrete] [[fix-cluster-status-recover-nodes]] ===== Recover lost nodes Shards often become unassigned when a data node leaves the cluster. This can -occur for several reasons, ranging from connectivity issues to hardware failure. +occur for several reasons: + +* A manual node restart will cause a temporary unhealthy cluster state until the node recovers. + +* When a node becomes overloaded or fails, it can temporarily disrupt the cluster’s health, leading to an unhealthy state. Prolonged garbage collection (GC) pauses, caused by out-of-memory errors or high memory usage during intensive searches, can trigger this state. See <> for more JVM-related issues. + +* Network issues can prevent reliable node communication, causing shards to become out of sync. Check the logs for repeated messages about nodes leaving and rejoining the cluster. + After you resolve the issue and recover the node, it will rejoin the cluster. {es} will then automatically allocate any unassigned shards. +You can monitor this process by <>. The number of unallocated shards should progressively decrease until green status is reached. + To avoid wasting resources on temporary issues, {es} <> by one minute by default. If you've recovered a node and don’t want to wait for the delay period, you can call the <> or add a delete phase. If you no longer need to search the data, you @@ -219,11 +216,39 @@ watermark or set it to an explicit byte value. PUT _cluster/settings { "persistent": { - "cluster.routing.allocation.disk.watermark.low": "30gb" + "cluster.routing.allocation.disk.watermark.low": "90%", + "cluster.routing.allocation.disk.watermark.high": "95%" } } ---- -// TEST[s/"30gb"/null/] +// TEST[s/"90%"/null/] +// TEST[s/"95%"/null/] + +[IMPORTANT] +==== +This is usually a temporary solution and may cause instability if disk space is not freed up. +==== + +[discrete] +[[fix-cluster-status-reenable-allocation]] +===== Re-enable shard allocation + +You typically disable allocation during a <> or other +cluster maintenance. If you forgot to re-enable allocation afterward, {es} will +be unable to assign shards. To re-enable allocation, reset the +`cluster.routing.allocation.enable` cluster setting. + +[source,console] +---- +PUT _cluster/settings +{ + "persistent" : { + "cluster.routing.allocation.enable" : null + } +} +---- + +See https://www.youtube.com/watch?v=MiKKUdZvwnI[this video] for walkthrough of troubleshooting "no allocations are allowed". [discrete] [[fix-cluster-status-jvm]] @@ -271,4 +296,4 @@ POST _cluster/reroute // TEST[s/^/PUT my-index\n/] // TEST[catch:bad_request] -See https://www.youtube.com/watch?v=6OAg9IyXFO4[this video] for a walkthrough of troubleshooting `no_valid_shard_copy`. \ No newline at end of file +See https://www.youtube.com/watch?v=6OAg9IyXFO4[this video] for a walkthrough of troubleshooting `no_valid_shard_copy`. diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index 69fc57973f68a..1e03c61df98e4 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -20,8 +20,11 @@ import java.net.InetAddress; import java.net.MulticastSocket; import java.net.NetworkInterface; +import java.net.Proxy; import java.net.ProxySelector; import java.net.ResponseCache; +import java.net.ServerSocket; +import java.net.Socket; import java.net.SocketAddress; import java.net.SocketImplFactory; import java.net.URL; @@ -32,7 +35,6 @@ import javax.net.ssl.HostnameVerifier; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSession; import javax.net.ssl.SSLSocketFactory; @SuppressWarnings("unused") // Called from instrumentation code inserted by the Entitlements agent @@ -193,9 +195,6 @@ public interface EntitlementChecker { void check$java_net_URL$(Class callerClass, URL context, String spec, URLStreamHandler handler); - // The only implementation of SSLSession#getSessionContext(); unfortunately it's an interface, so we need to check the implementation - void check$sun_security_ssl_SSLSessionImpl$getSessionContext(Class callerClass, SSLSession sslSession); - void check$java_net_DatagramSocket$bind(Class callerClass, DatagramSocket that, SocketAddress addr); void check$java_net_DatagramSocket$connect(Class callerClass, DatagramSocket that, InetAddress addr); @@ -219,4 +218,40 @@ public interface EntitlementChecker { void check$java_net_MulticastSocket$leaveGroup(Class callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni); void check$java_net_MulticastSocket$send(Class callerClass, MulticastSocket that, DatagramPacket p, byte ttl); + + // Binding/connecting ctor + void check$java_net_ServerSocket$(Class callerClass, int port); + + void check$java_net_ServerSocket$(Class callerClass, int port, int backlog); + + void check$java_net_ServerSocket$(Class callerClass, int port, int backlog, InetAddress bindAddr); + + void check$java_net_ServerSocket$accept(Class callerClass, ServerSocket that); + + void check$java_net_ServerSocket$implAccept(Class callerClass, ServerSocket that, Socket s); + + void check$java_net_ServerSocket$bind(Class callerClass, ServerSocket that, SocketAddress endpoint); + + void check$java_net_ServerSocket$bind(Class callerClass, ServerSocket that, SocketAddress endpoint, int backlog); + + // Binding/connecting ctors + void check$java_net_Socket$(Class callerClass, Proxy proxy); + + void check$java_net_Socket$(Class callerClass, String host, int port); + + void check$java_net_Socket$(Class callerClass, InetAddress address, int port); + + void check$java_net_Socket$(Class callerClass, String host, int port, InetAddress localAddr, int localPort); + + void check$java_net_Socket$(Class callerClass, InetAddress address, int port, InetAddress localAddr, int localPort); + + void check$java_net_Socket$(Class callerClass, String host, int port, boolean stream); + + void check$java_net_Socket$(Class callerClass, InetAddress host, int port, boolean stream); + + void check$java_net_Socket$bind(Class callerClass, Socket that, SocketAddress endpoint); + + void check$java_net_Socket$connect(Class callerClass, Socket that, SocketAddress endpoint); + + void check$java_net_Socket$connect(Class callerClass, Socket that, SocketAddress endpoint, int backlog); } diff --git a/libs/entitlement/qa/common/build.gradle b/libs/entitlement/qa/common/build.gradle index df3bc66cba21b..18bc5679d09c9 100644 --- a/libs/entitlement/qa/common/build.gradle +++ b/libs/entitlement/qa/common/build.gradle @@ -7,9 +7,16 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ +import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask + apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.mrjar' dependencies { implementation project(':server') implementation project(':libs:logging') } + +tasks.withType(CheckForbiddenApisTask).configureEach { + replaceSignatureFiles 'jdk-signatures' +} diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java index fae873123528d..304aead1e2bf6 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java @@ -10,14 +10,18 @@ package org.elasticsearch.entitlement.qa.common; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.DatagramSocketImpl; import java.net.InetAddress; import java.net.NetworkInterface; +import java.net.ServerSocket; import java.net.Socket; import java.net.SocketAddress; import java.net.SocketException; +import java.net.SocketImpl; import java.security.cert.Certificate; import java.text.BreakIterator; import java.text.Collator; @@ -297,6 +301,81 @@ public Certificate[] getServerCertificates() { } } + private static class DummySocketImpl extends SocketImpl { + @Override + protected void create(boolean stream) {} + + @Override + protected void connect(String host, int port) {} + + @Override + protected void connect(InetAddress address, int port) {} + + @Override + protected void connect(SocketAddress address, int timeout) {} + + @Override + protected void bind(InetAddress host, int port) {} + + @Override + protected void listen(int backlog) {} + + @Override + protected void accept(SocketImpl s) {} + + @Override + protected InputStream getInputStream() { + return null; + } + + @Override + protected OutputStream getOutputStream() { + return null; + } + + @Override + protected int available() { + return 0; + } + + @Override + protected void close() {} + + @Override + protected void sendUrgentData(int data) {} + + @Override + public void setOption(int optID, Object value) {} + + @Override + public Object getOption(int optID) { + return null; + } + } + + static class DummySocket extends Socket { + DummySocket() throws SocketException { + super(new DummySocketImpl()); + } + } + + static class DummyServerSocket extends ServerSocket { + DummyServerSocket() { + super(new DummySocketImpl()); + } + } + + static class DummyBoundServerSocket extends ServerSocket { + DummyBoundServerSocket() { + super(new DummySocketImpl()); + } + + @Override + public boolean isBound() { + return true; + } + } + static class DummySSLSocketFactory extends SSLSocketFactory { @Override public Socket createSocket(String host, int port) { diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/NetworkAccessCheckActions.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/NetworkAccessCheckActions.java new file mode 100644 index 0000000000000..c88d4ce2b11a9 --- /dev/null +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/NetworkAccessCheckActions.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +import org.elasticsearch.core.SuppressForbidden; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.ServerSocket; +import java.net.Socket; + +class NetworkAccessCheckActions { + + static void serverSocketAccept() throws IOException { + try (ServerSocket socket = new DummyImplementations.DummyBoundServerSocket()) { + try { + socket.accept(); + } catch (IOException e) { + // Our dummy socket cannot accept connections unless we tell the JDK how to create a socket for it. + // But Socket.setSocketImplFactory(); is one of the methods we always forbid, so we cannot use it. + // Still, we can check accept is called (allowed/denied), we don't care if it fails later for this + // known reason. + assert e.getMessage().contains("client socket implementation factory not set"); + } + } + } + + static void serverSocketBind() throws IOException { + try (ServerSocket socket = new DummyImplementations.DummyServerSocket()) { + socket.bind(null); + } + } + + @SuppressForbidden(reason = "Testing entitlement check on forbidden action") + static void createSocketWithProxy() throws IOException { + try (Socket socket = new Socket(new Proxy(Proxy.Type.HTTP, new InetSocketAddress(0)))) { + assert socket.isBound() == false; + } + } + + static void socketBind() throws IOException { + try (Socket socket = new DummyImplementations.DummySocket()) { + socket.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0)); + } + } + + @SuppressForbidden(reason = "Testing entitlement check on forbidden action") + static void socketConnect() throws IOException { + try (Socket socket = new DummyImplementations.DummySocket()) { + socket.connect(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0)); + } + } +} diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index 3a5480f468528..9e7e6e33f3eda 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -49,20 +49,16 @@ import java.net.URLClassLoader; import java.net.URLConnection; import java.net.URLStreamHandler; -import java.net.spi.InetAddressResolver; -import java.net.spi.InetAddressResolverProvider; import java.net.spi.URLStreamHandlerProvider; import java.security.NoSuchAlgorithmException; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSession; -import javax.net.ssl.SSLSocket; -import javax.net.ssl.SSLSocketFactory; import static java.util.Map.entry; import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.alwaysDenied; @@ -76,25 +72,25 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing"); private final String prefix; - record CheckAction(CheckedRunnable action, boolean isAlwaysDeniedToPlugins) { + record CheckAction(CheckedRunnable action, boolean isAlwaysDeniedToPlugins, Integer fromJavaVersion) { /** * These cannot be granted to plugins, so our test plugins cannot test the "allowed" case. - * Used both for always-denied entitlements as well as those granted only to the server itself. + * Used both for always-denied entitlements and those granted only to the server itself. */ static CheckAction deniedToPlugins(CheckedRunnable action) { - return new CheckAction(action, true); + return new CheckAction(action, true, null); } static CheckAction forPlugins(CheckedRunnable action) { - return new CheckAction(action, false); + return new CheckAction(action, false, null); } static CheckAction alwaysDenied(CheckedRunnable action) { - return new CheckAction(action, true); + return new CheckAction(action, true, null); } } - private static final Map checkActions = Map.ofEntries( + private static final Map checkActions = Stream.of( entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)), @@ -143,18 +139,28 @@ static CheckAction alwaysDenied(CheckedRunnable action) { entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)), entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)), - entry("createInetAddressResolverProvider", alwaysDenied(RestEntitlementsCheckAction::createInetAddressResolverProvider)), + entry( + "createInetAddressResolverProvider", + new CheckAction(VersionSpecificNetworkChecks::createInetAddressResolverProvider, true, 18) + ), entry("createURLStreamHandlerProvider", alwaysDenied(RestEntitlementsCheckAction::createURLStreamHandlerProvider)), entry("createURLWithURLStreamHandler", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler)), entry("createURLWithURLStreamHandler2", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler2)), - entry("sslSessionImpl_getSessionContext", alwaysDenied(RestEntitlementsCheckAction::sslSessionImplGetSessionContext)), entry("datagram_socket_bind", forPlugins(RestEntitlementsCheckAction::bindDatagramSocket)), entry("datagram_socket_connect", forPlugins(RestEntitlementsCheckAction::connectDatagramSocket)), entry("datagram_socket_send", forPlugins(RestEntitlementsCheckAction::sendDatagramSocket)), entry("datagram_socket_receive", forPlugins(RestEntitlementsCheckAction::receiveDatagramSocket)), entry("datagram_socket_join_group", forPlugins(RestEntitlementsCheckAction::joinGroupDatagramSocket)), - entry("datagram_socket_leave_group", forPlugins(RestEntitlementsCheckAction::leaveGroupDatagramSocket)) - ); + entry("datagram_socket_leave_group", forPlugins(RestEntitlementsCheckAction::leaveGroupDatagramSocket)), + + entry("create_socket_with_proxy", forPlugins(NetworkAccessCheckActions::createSocketWithProxy)), + entry("socket_bind", forPlugins(NetworkAccessCheckActions::socketBind)), + entry("socket_connect", forPlugins(NetworkAccessCheckActions::socketConnect)), + entry("server_socket_bind", forPlugins(NetworkAccessCheckActions::serverSocketBind)), + entry("server_socket_accept", forPlugins(NetworkAccessCheckActions::serverSocketAccept)) + ) + .filter(entry -> entry.getValue().fromJavaVersion() == null || Runtime.version().feature() >= entry.getValue().fromJavaVersion()) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); private static void createURLStreamHandlerProvider() { var x = new URLStreamHandlerProvider() { @@ -165,15 +171,6 @@ public URLStreamHandler createURLStreamHandler(String protocol) { }; } - private static void sslSessionImplGetSessionContext() throws IOException { - SSLSocketFactory factory = HttpsURLConnection.getDefaultSSLSocketFactory(); - try (SSLSocket socket = (SSLSocket) factory.createSocket()) { - SSLSession session = socket.getSession(); - - session.getSessionContext(); - } - } - @SuppressWarnings("deprecation") private static void createURLWithURLStreamHandler() throws MalformedURLException { var x = new URL("http", "host", 1234, "file", new URLStreamHandler() { @@ -194,20 +191,6 @@ protected URLConnection openConnection(URL u) { }); } - private static void createInetAddressResolverProvider() { - var x = new InetAddressResolverProvider() { - @Override - public InetAddressResolver get(Configuration configuration) { - return null; - } - - @Override - public String name() { - return "TEST"; - } - }; - } - private static void setDefaultResponseCache() { ResponseCache.setDefault(null); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java similarity index 56% rename from server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java rename to libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java index 49bd38330e3af..e1e0b9e52f510 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java @@ -7,16 +7,8 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.cluster.metadata; +package org.elasticsearch.entitlement.qa.common; -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class MetadataFeatures implements FeatureSpecification { - @Override - public Set getFeatures() { - return Set.of(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED); - } +class VersionSpecificNetworkChecks { + static void createInetAddressResolverProvider() {} } diff --git a/libs/entitlement/qa/common/src/main18/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java b/libs/entitlement/qa/common/src/main18/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java new file mode 100644 index 0000000000000..0ead32ec480ee --- /dev/null +++ b/libs/entitlement/qa/common/src/main18/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +import java.net.spi.InetAddressResolver; +import java.net.spi.InetAddressResolverProvider; + +class VersionSpecificNetworkChecks { + static void createInetAddressResolverProvider() { + var x = new InetAddressResolverProvider() { + @Override + public InetAddressResolver get(Configuration configuration) { + return null; + } + + @Override + public String name() { + return "TEST"; + } + }; + } +} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index ba5ccbafa70ae..9b621461403d1 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -22,6 +22,7 @@ import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement; import org.elasticsearch.entitlement.runtime.policy.Entitlement; import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement; +import org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement; import org.elasticsearch.entitlement.runtime.policy.Policy; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; import org.elasticsearch.entitlement.runtime.policy.PolicyParser; @@ -44,6 +45,9 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.ACCEPT_ACTION; +import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.CONNECT_ACTION; +import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.LISTEN_ACTION; import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED; /** @@ -97,7 +101,15 @@ private static PolicyManager createPolicyManager() throws IOException { List.of( new Scope("org.elasticsearch.base", List.of(new CreateClassLoaderEntitlement())), new Scope("org.elasticsearch.xcontent", List.of(new CreateClassLoaderEntitlement())), - new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement())) + new Scope( + "org.elasticsearch.server", + List.of( + new ExitVMEntitlement(), + new CreateClassLoaderEntitlement(), + new NetworkEntitlement(LISTEN_ACTION | CONNECT_ACTION | ACCEPT_ACTION) + ) + ), + new Scope("org.apache.httpcomponents.httpclient", List.of(new NetworkEntitlement(CONNECT_ACTION))) ) ); // agents run without a module, so this is a special hack for the apm agent diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index dd39ec3c5fe43..695d1c574c7c3 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -24,8 +24,11 @@ import java.net.InetAddress; import java.net.MulticastSocket; import java.net.NetworkInterface; +import java.net.Proxy; import java.net.ProxySelector; import java.net.ResponseCache; +import java.net.ServerSocket; +import java.net.Socket; import java.net.SocketAddress; import java.net.SocketImplFactory; import java.net.URL; @@ -36,7 +39,6 @@ import javax.net.ssl.HostnameVerifier; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSession; import javax.net.ssl.SSLSocketFactory; /** @@ -352,11 +354,6 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { policyManager.checkChangeNetworkHandling(callerClass); } - @Override - public void check$sun_security_ssl_SSLSessionImpl$getSessionContext(Class callerClass, SSLSession sslSession) { - policyManager.checkReadSensitiveNetworkInformation(callerClass); - } - @Override public void check$java_net_DatagramSocket$bind(Class callerClass, DatagramSocket that, SocketAddress addr) { policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION); @@ -420,4 +417,91 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { public void check$java_net_MulticastSocket$send(Class callerClass, MulticastSocket that, DatagramPacket p, byte ttl) { policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); } + + @Override + public void check$java_net_ServerSocket$(Class callerClass, int port) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION); + } + + @Override + public void check$java_net_ServerSocket$(Class callerClass, int port, int backlog) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION); + } + + @Override + public void check$java_net_ServerSocket$(Class callerClass, int port, int backlog, InetAddress bindAddr) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION); + } + + @Override + public void check$java_net_ServerSocket$accept(Class callerClass, ServerSocket that) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_ServerSocket$implAccept(Class callerClass, ServerSocket that, Socket s) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_ServerSocket$bind(Class callerClass, ServerSocket that, SocketAddress endpoint) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION); + } + + @Override + public void check$java_net_ServerSocket$bind(Class callerClass, ServerSocket that, SocketAddress endpoint, int backlog) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION); + } + + @Override + public void check$java_net_Socket$(Class callerClass, Proxy proxy) { + if (proxy.type() == Proxy.Type.SOCKS || proxy.type() == Proxy.Type.HTTP) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION); + } + } + + @Override + public void check$java_net_Socket$(Class callerClass, String host, int port) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION); + } + + @Override + public void check$java_net_Socket$(Class callerClass, InetAddress address, int port) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION); + } + + @Override + public void check$java_net_Socket$(Class callerClass, String host, int port, InetAddress localAddr, int localPort) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION); + } + + @Override + public void check$java_net_Socket$(Class callerClass, InetAddress address, int port, InetAddress localAddr, int localPort) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION); + } + + @Override + public void check$java_net_Socket$(Class callerClass, String host, int port, boolean stream) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION); + } + + @Override + public void check$java_net_Socket$(Class callerClass, InetAddress host, int port, boolean stream) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION); + } + + @Override + public void check$java_net_Socket$bind(Class callerClass, Socket that, SocketAddress endpoint) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION); + } + + @Override + public void check$java_net_Socket$connect(Class callerClass, Socket that, SocketAddress endpoint) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION); + } + + @Override + public void check$java_net_Socket$connect(Class callerClass, Socket that, SocketAddress endpoint, int backlog) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION); + } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java index b6c6a41d5be7f..9b4035cee98d0 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java @@ -58,7 +58,11 @@ public NetworkEntitlement(List actionsList) { this.actions = actionsInt; } - public static Object printActions(int actions) { + public NetworkEntitlement(int actions) { + this.actions = actions; + } + + public static String printActions(int actions) { var joiner = new StringJoiner(","); for (var entry : ACTION_MAP.entrySet()) { var action = entry.getValue(); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index f039fbda3dfbd..aeb54d5c1156c 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -200,21 +200,21 @@ public void checkNetworkAccess(Class callerClass, int actions) { return; } - ModuleEntitlements entitlements = getEntitlements(requestingClass); + ModuleEntitlements entitlements = getEntitlements(requestingClass, NetworkEntitlement.class); if (entitlements.getEntitlements(NetworkEntitlement.class).anyMatch(n -> n.matchActions(actions))) { logger.debug( () -> Strings.format( - "Entitled: class [%s], module [%s], entitlement [Network], actions [Ox%X]", + "Entitled: class [%s], module [%s], entitlement [network], actions [%s]", requestingClass, requestingClass.getModule().getName(), - actions + NetworkEntitlement.printActions(actions) ) ); return; } throw new NotEntitledException( Strings.format( - "Missing entitlement: class [%s], module [%s], entitlement [Network], actions [%s]", + "Missing entitlement: class [%s], module [%s], entitlement [network], actions [%s]", requestingClass, requestingClass.getModule().getName(), NetworkEntitlement.printActions(actions) @@ -228,14 +228,14 @@ private void checkEntitlementPresent(Class callerClass, Class Strings.format( "Entitled: class [%s], module [%s], entitlement [%s]", requestingClass, requestingClass.getModule().getName(), - entitlementClass.getSimpleName() + PolicyParser.getEntitlementTypeName(entitlementClass) ) ); return; @@ -245,19 +245,22 @@ private void checkEntitlementPresent(Class callerClass, Class requestingClass) { - return moduleEntitlementsMap.computeIfAbsent(requestingClass.getModule(), m -> computeEntitlements(requestingClass)); + ModuleEntitlements getEntitlements(Class requestingClass, Class entitlementClass) { + return moduleEntitlementsMap.computeIfAbsent( + requestingClass.getModule(), + m -> computeEntitlements(requestingClass, entitlementClass) + ); } - private ModuleEntitlements computeEntitlements(Class requestingClass) { + private ModuleEntitlements computeEntitlements(Class requestingClass, Class entitlementClass) { Module requestingModule = requestingClass.getModule(); if (isServerModule(requestingModule)) { - return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName()); + return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName(), "server", entitlementClass); } // plugins @@ -271,7 +274,7 @@ private ModuleEntitlements computeEntitlements(Class requestingClass) { } else { scopeName = requestingModule.getName(); } - return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName); + return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName, pluginName, entitlementClass); } } @@ -287,11 +290,19 @@ private ModuleEntitlements computeEntitlements(Class requestingClass) { private ModuleEntitlements getModuleScopeEntitlements( Class callerClass, Map> scopeEntitlements, - String moduleName + String moduleName, + String component, + Class entitlementClass ) { var entitlements = scopeEntitlements.get(moduleName); if (entitlements == null) { - logger.warn("No applicable entitlement policy for module [{}], class [{}]", moduleName, callerClass); + logger.warn( + "No applicable entitlement policy for entitlement [{}] in [{}], module [{}], class [{}]", + PolicyParser.getEntitlementTypeName(entitlementClass), + component, + moduleName, + callerClass + ); return ModuleEntitlements.NONE; } return ModuleEntitlements.from(entitlements); diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java index d22c2f598e344..092813be75cc8 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java @@ -38,7 +38,7 @@ public class PolicyManagerTests extends ESTestCase { /** * A module you can use for test cases that don't actually care about the - * entitlements module. + * entitlement module. */ private static Module NO_ENTITLEMENTS_MODULE; @@ -66,7 +66,11 @@ public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - assertEquals("No policy for the unnamed module", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); + assertEquals( + "No policy for the unnamed module", + ModuleEntitlements.NONE, + policyManager.getEntitlements(callerClass, Entitlement.class) + ); assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } @@ -78,7 +82,7 @@ public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); + assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class)); assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } @@ -90,11 +94,11 @@ public void testGetEntitlementsFailureIsCached() { var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); + assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class)); assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); // A second time - assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); + assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class)); // Nothing new in the map assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); @@ -112,7 +116,7 @@ public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { // Any class from the current module (unnamed) will do var callerClass = this.getClass(); - var entitlements = policyManager.getEntitlements(callerClass); + var entitlements = policyManager.getEntitlements(callerClass, Entitlement.class); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); } @@ -126,7 +130,11 @@ public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotF var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); var requestingModule = mockServerClass.getModule(); - assertEquals("No policy for this module in server", ModuleEntitlements.NONE, policyManager.getEntitlements(mockServerClass)); + assertEquals( + "No policy for this module in server", + ModuleEntitlements.NONE, + policyManager.getEntitlements(mockServerClass, Entitlement.class) + ); assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } @@ -145,9 +153,8 @@ public void testGetEntitlementsReturnsEntitlementsForServerModule() throws Class // So we use a random module in the boot layer, and a random class from that module (not java.base -- it is // loaded too early) to mimic a class that would be in the server module. var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); - var requestingModule = mockServerClass.getModule(); - var entitlements = policyManager.getEntitlements(mockServerClass); + var entitlements = policyManager.getEntitlements(mockServerClass, Entitlement.class); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true)); } @@ -167,9 +174,8 @@ public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOExc var layer = createLayerForJar(jar, "org.example.plugin"); var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B"); - var requestingModule = mockPluginClass.getModule(); - var entitlements = policyManager.getEntitlements(mockPluginClass); + var entitlements = policyManager.getEntitlements(mockPluginClass, Entitlement.class); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat( entitlements.getEntitlements(FileEntitlement.class).toList(), @@ -189,11 +195,11 @@ public void testGetEntitlementsResultIsCached() { // Any class from the current module (unnamed) will do var callerClass = this.getClass(); - var entitlements = policyManager.getEntitlements(callerClass); + var entitlements = policyManager.getEntitlements(callerClass, Entitlement.class); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); - var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get(); - var entitlementsAgain = policyManager.getEntitlements(callerClass); + var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().orElseThrow(); + var entitlementsAgain = policyManager.getEntitlements(callerClass, Entitlement.class); // Nothing new in the map assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java index 43447cfa21a62..339a4ec24ca13 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java @@ -92,7 +92,14 @@ public List> getSettings() { APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING, APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING, APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING, - APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES + APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES, + // The settings below are deprecated and are currently kept as fallback. + APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING, + APMAgentSettings.TRACING_APM_API_KEY_SETTING, + APMAgentSettings.TRACING_APM_ENABLED_SETTING, + APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING, + APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING, + APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES ); } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java index 8647761e2defe..f66683a787bc0 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java @@ -25,7 +25,9 @@ import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.function.Function; +import static org.elasticsearch.common.settings.Setting.Property.Deprecated; import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic; @@ -99,6 +101,9 @@ public void setAgentSetting(String key, String value) { private static final String TELEMETRY_SETTING_PREFIX = "telemetry."; + // The old legacy prefix + private static final String LEGACY_TRACING_APM_SETTING_PREFIX = "tracing.apm."; + /** * Allow-list of APM agent config keys users are permitted to configure. * @see APM Java Agent Configuration @@ -243,24 +248,56 @@ private static Setting concreteAgentSetting(String namespace, String qua public static final Setting.AffixSetting APM_AGENT_SETTINGS = Setting.prefixKeySetting( TELEMETRY_SETTING_PREFIX + "agent.", - null, // no fallback - (namespace, qualifiedKey) -> concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) + LEGACY_TRACING_APM_SETTING_PREFIX + "agent.", + (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX) + ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated) + : concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) ); - public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting( + /** + * @deprecated in favor of TELEMETRY_TRACING_NAMES_INCLUDE_SETTING. + */ + @Deprecated + public static final Setting> TRACING_APM_NAMES_INCLUDE_SETTING = Setting.stringListSetting( + LEGACY_TRACING_APM_SETTING_PREFIX + "names.include", + OperatorDynamic, + NodeScope, + Deprecated + ); + + public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting( TELEMETRY_SETTING_PREFIX + "tracing.names.include", + TRACING_APM_NAMES_INCLUDE_SETTING, + Function.identity(), OperatorDynamic, NodeScope ); - public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( + /** + * @deprecated in favor of TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING. + */ + @Deprecated + public static final Setting> TRACING_APM_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( + LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude", + OperatorDynamic, + NodeScope, + Deprecated + ); + + public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting( TELEMETRY_SETTING_PREFIX + "tracing.names.exclude", + TRACING_APM_NAMES_EXCLUDE_SETTING, + Function.identity(), OperatorDynamic, NodeScope ); - public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting( - TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names", + /** + * @deprecated in favor of TELEMETRY_TRACING_SANITIZE_FIELD_NAMES. + */ + @Deprecated + public static final Setting> TRACING_APM_SANITIZE_FIELD_NAMES = Setting.stringListSetting( + LEGACY_TRACING_APM_SETTING_PREFIX + "sanitize_field_names", List.of( "password", "passwd", @@ -276,12 +313,33 @@ private static Setting concreteAgentSetting(String namespace, String qua "set-cookie" ), OperatorDynamic, + NodeScope, + Deprecated + ); + + public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting( + TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names", + TRACING_APM_SANITIZE_FIELD_NAMES, + Function.identity(), + OperatorDynamic, NodeScope ); + /** + * @deprecated in favor of TELEMETRY_TRACING_ENABLED_SETTING. + */ + @Deprecated + public static final Setting TRACING_APM_ENABLED_SETTING = Setting.boolSetting( + LEGACY_TRACING_APM_SETTING_PREFIX + "enabled", + false, + OperatorDynamic, + NodeScope, + Deprecated + ); + public static final Setting TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting( TELEMETRY_SETTING_PREFIX + "tracing.enabled", - false, + TRACING_APM_ENABLED_SETTING, OperatorDynamic, NodeScope ); @@ -293,13 +351,33 @@ private static Setting concreteAgentSetting(String namespace, String qua NodeScope ); + /** + * @deprecated in favor of TELEMETRY_SECRET_TOKEN_SETTING. + */ + @Deprecated + public static final Setting TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString( + LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token", + null, + Deprecated + ); + public static final Setting TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString( TELEMETRY_SETTING_PREFIX + "secret_token", - null + TRACING_APM_SECRET_TOKEN_SETTING + ); + + /** + * @deprecated in favor of TELEMETRY_API_KEY_SETTING. + */ + @Deprecated + public static final Setting TRACING_APM_API_KEY_SETTING = SecureSetting.secureString( + LEGACY_TRACING_APM_SETTING_PREFIX + "api_key", + null, + Deprecated ); public static final Setting TELEMETRY_API_KEY_SETTING = SecureSetting.secureString( TELEMETRY_SETTING_PREFIX + "api_key", - null + TRACING_APM_API_KEY_SETTING ); } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java index 5516672420924..a60048c82a3c9 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.mockito.Mockito; @@ -19,13 +21,21 @@ import java.util.Set; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_API_KEY_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_SECRET_TOKEN_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_API_KEY_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_ENABLED_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasItem; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; @@ -60,6 +70,14 @@ public void testEnableTracing() { } } + public void testEnableTracingUsingLegacySetting() { + Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), true).build(); + apmAgentSettings.initAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("recording", "true"); + assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + public void testEnableMetrics() { for (boolean tracingEnabled : List.of(true, false)) { clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService()); @@ -103,6 +121,14 @@ public void testDisableTracing() { } } + public void testDisableTracingUsingLegacySetting() { + Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), false).build(); + apmAgentSettings.initAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("recording", "false"); + assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + public void testDisableMetrics() { for (boolean tracingEnabled : List.of(true, false)) { clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService()); @@ -155,18 +181,70 @@ public void testSetAgentSettings() { verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true"); } + public void testSetAgentsSettingsWithLegacyPrefix() { + Settings settings = Settings.builder() + .put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), true) + .put("tracing.apm.agent.span_compression_enabled", "true") + .build(); + apmAgentSettings.initAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("recording", "true"); + verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true"); + assertWarnings( + "[tracing.apm.agent.span_compression_enabled] setting was deprecated in Elasticsearch and will be removed in a future release." + ); + } + /** * Check that invalid or forbidden APM agent settings are rejected. */ public void testRejectForbiddenOrUnknownAgentSettings() { - String prefix = APM_AGENT_SETTINGS.getKey(); - Settings settings = Settings.builder().put(prefix + "unknown", "true").build(); - Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings)); - assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]")); - + List prefixes = List.of(APM_AGENT_SETTINGS.getKey(), "tracing.apm.agent."); + for (String prefix : prefixes) { + Settings settings = Settings.builder().put(prefix + "unknown", "true").build(); + Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings)); + assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]")); + } // though, accept / ignore nested global_labels - var map = APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(Settings.builder().put(prefix + "global_labels.abc", "123").build()); - assertThat(map, hasEntry("global_labels.abc", "123")); + for (String prefix : prefixes) { + Settings settings = Settings.builder().put(prefix + "global_labels.abc", "123").build(); + APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(settings); + + if (prefix.startsWith("tracing.apm.agent.")) { + assertWarnings( + "[tracing.apm.agent.global_labels.abc] setting was deprecated in Elasticsearch and will be removed in a future release." + ); + } + } + } + + public void testTelemetryTracingNamesIncludeFallback() { + Settings settings = Settings.builder().put(TRACING_APM_NAMES_INCLUDE_SETTING.getKey(), "abc,xyz").build(); + + List included = TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.get(settings); + + assertThat(included, containsInAnyOrder("abc", "xyz")); + assertWarnings("[tracing.apm.names.include] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + + public void testTelemetryTracingNamesExcludeFallback() { + Settings settings = Settings.builder().put(TRACING_APM_NAMES_EXCLUDE_SETTING.getKey(), "abc,xyz").build(); + + List included = TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.get(settings); + + assertThat(included, containsInAnyOrder("abc", "xyz")); + assertWarnings("[tracing.apm.names.exclude] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + + public void testTelemetryTracingSanitizeFieldNamesFallback() { + Settings settings = Settings.builder().put(TRACING_APM_SANITIZE_FIELD_NAMES.getKey(), "abc,xyz").build(); + + List included = TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.get(settings); + + assertThat(included, containsInAnyOrder("abc", "xyz")); + assertWarnings( + "[tracing.apm.sanitize_field_names] setting was deprecated in Elasticsearch and will be removed in a future release." + ); } public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() { @@ -174,6 +252,28 @@ public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() { assertThat(included, hasItem("password")); // and more defaults } + public void testTelemetrySecretTokenFallback() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(TRACING_APM_SECRET_TOKEN_SETTING.getKey(), "verysecret"); + Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + + try (SecureString secureString = TELEMETRY_SECRET_TOKEN_SETTING.get(settings)) { + assertEquals("verysecret", secureString.toString()); + } + assertWarnings("[tracing.apm.secret_token] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + + public void testTelemetryApiKeyFallback() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(TRACING_APM_API_KEY_SETTING.getKey(), "abc"); + Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + + try (SecureString secureString = TELEMETRY_API_KEY_SETTING.get(settings)) { + assertEquals("abc", secureString.toString()); + } + assertWarnings("[tracing.apm.api_key] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + /** * Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting. */ diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 2739eb51376ea..839ac9c7653e4 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -182,7 +182,8 @@ public void testBasicScenario() throws Exception { String backingIndex = barDataStream.getIndices().get(0).getName(); backingIndices.add(backingIndex); - GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)) + .actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); Map mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); @@ -190,7 +191,7 @@ public void testBasicScenario() throws Exception { backingIndex = fooDataStream.getIndices().get(0).getName(); backingIndices.add(backingIndex); - getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); @@ -214,7 +215,7 @@ public void testBasicScenario() throws Exception { backingIndex = fooRolloverResponse.getNewIndex(); backingIndices.add(backingIndex); - getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); @@ -222,7 +223,7 @@ public void testBasicScenario() throws Exception { backingIndex = barRolloverResponse.getNewIndex(); backingIndices.add(backingIndex); - getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); @@ -245,7 +246,7 @@ public void testBasicScenario() throws Exception { expectThrows( IndexNotFoundException.class, "Backing index '" + index + "' should have been deleted.", - () -> indicesAdmin().getIndex(new GetIndexRequest().indices(index)).actionGet() + () -> indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)).actionGet() ); } } @@ -479,7 +480,8 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except String backingIndex = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(); assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1)); - GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(dataStreamName)).actionGet(); + GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(dataStreamName)) + .actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); assertThat( @@ -492,7 +494,7 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 2)); assertTrue(rolloverResponse.isRolledOver()); - getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); assertThat( @@ -518,7 +520,7 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except expectThrows( IndexNotFoundException.class, "Backing index '" + index.getName() + "' should have been deleted.", - () -> indicesAdmin().getIndex(new GetIndexRequest().indices(index.getName())).actionGet() + () -> indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index.getName())).actionGet() ); } } @@ -606,7 +608,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(dataStreamName, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, dataStreamName), false); verifyResolvability(dataStreamName, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(dataStreamName), false); verifyResolvability(dataStreamName, client().prepareFieldCaps(dataStreamName).setFields("*"), false); - verifyResolvability(dataStreamName, indicesAdmin().prepareGetIndex().addIndices(dataStreamName), false); + verifyResolvability(dataStreamName, indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(dataStreamName), false); verifyResolvability(dataStreamName, indicesAdmin().prepareOpen(dataStreamName), false); verifyResolvability(dataStreamName, indicesAdmin().prepareClose(dataStreamName), true); verifyResolvability(aliasToDataStream, indicesAdmin().prepareClose(aliasToDataStream), true); @@ -653,7 +655,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(wildcardExpression, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, wildcardExpression), false); verifyResolvability(wildcardExpression, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(wildcardExpression), false); verifyResolvability(wildcardExpression, client().prepareFieldCaps(wildcardExpression).setFields("*"), false); - verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex().addIndices(wildcardExpression), false); + verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareClose(wildcardExpression), false); verifyResolvability( @@ -1401,7 +1403,8 @@ public void testGetDataStream() throws Exception { } private static void assertBackingIndex(String backingIndex, String timestampFieldPathInMapping, Map expectedMapping) { - GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)) + .actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); Map mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); @@ -1488,7 +1491,8 @@ public void testMixedAutoCreate() throws Exception { assertThat(getDataStreamsResponse.getDataStreams().get(2).getDataStream().getName(), equalTo("logs-foobaz2")); assertThat(getDataStreamsResponse.getDataStreams().get(3).getDataStream().getName(), equalTo("logs-foobaz3")); - GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices("logs-bar*")).actionGet(); + GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("logs-bar*")) + .actionGet(); assertThat(getIndexResponse.getIndices(), arrayWithSize(4)); assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barbaz")); assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo")); @@ -1521,7 +1525,8 @@ public void testAutoCreateV1TemplateNoDataStream() { .actionGet(); assertThat(getDataStreamsResponse.getDataStreams(), hasSize(0)); - GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices("logs-foobar")).actionGet(); + GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("logs-foobar")) + .actionGet(); assertThat(getIndexResponse.getIndices(), arrayWithSize(1)); assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-foobar")); assertThat(getIndexResponse.getSettings().get("logs-foobar").get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS), equalTo("0")); @@ -1657,7 +1662,7 @@ public void testMultiThreadedRollover() throws Exception { .actionGet(); String newBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getWriteIndex().getName(); assertThat(newBackingIndexName, backingIndexEqualTo("potato-biscuit", 2)); - indicesAdmin().prepareGetIndex().addIndices(newBackingIndexName).get(); + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(newBackingIndexName).get(); } catch (Exception e) { logger.info("--> expecting second index to be created but it has not yet been created"); fail("expecting second index to exist"); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index f6c703b96888c..40bde501f0bfd 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -1304,7 +1304,7 @@ public void testRestoreSnapshotFully() throws Exception { assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); assertThat(getDataStreamInfo("*"), hasSize(3)); - assertNotNull(client.admin().indices().prepareGetIndex().setIndices(indexName).get()); + assertNotNull(client.admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(indexName).get()); } public void testRestoreDataStreamAliasWithConflictingDataStream() throws Exception { diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java index c08a3548127ec..c02c7ea25b122 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java @@ -50,7 +50,7 @@ public void testDefaultDataStreamAllocateToHot() { .setWaitForActiveShards(0) .get() .getIndex(); - var idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(dsIndexName); + var idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(dsIndexName); assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT)); logger.info("--> waiting for {} to be yellow", index); @@ -62,7 +62,7 @@ public void testDefaultDataStreamAllocateToHot() { // new index name should have the rolled over name assertNotEquals(dsIndexName, rolledOverIndexName); - idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(rolledOverIndexName); + idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(rolledOverIndexName); assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT)); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java index aa6ecf35e06fa..0bba93ee6ec36 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java @@ -341,7 +341,10 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept DataStream fooDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream(); String backingIndex = fooDataStream.getIndices().get(0).getName(); backingIndices.add(backingIndex); - GetIndexResponse getIndexResponse = client.admin().indices().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + GetIndexResponse getIndexResponse = client.admin() + .indices() + .getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)) + .actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); Map mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); @@ -377,7 +380,10 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept DataStream barDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream(); String backingIndex = barDataStream.getIndices().get(0).getName(); backingIndices.add(backingIndex); - GetIndexResponse getIndexResponse = client.admin().indices().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + GetIndexResponse getIndexResponse = client.admin() + .indices() + .getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)) + .actionGet(); assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); Map mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java index 2083807b1227f..855644a09e0e0 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java @@ -104,7 +104,7 @@ public void testSystemDataStreamInGlobalState() throws Exception { } { - GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex().addIndices("_all").get(); + GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("_all").get(); assertThat(indicesRemaining.indices(), arrayWithSize(0)); assertSystemDataStreamDoesNotExist(); } @@ -236,7 +236,7 @@ public void testSystemDataStreamInFeatureState() throws Exception { assertAcked(indicesAdmin().prepareDelete("my-index")); { - GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex().addIndices("_all").get(); + GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("_all").get(); assertThat(indicesRemaining.indices(), arrayWithSize(0)); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index aad68660d2e4d..434a8bced8895 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -155,7 +155,7 @@ public void testTimeRanges() throws Exception { } // fetch end time - var getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndexName)).actionGet(); + var getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndexName)).actionGet(); Instant endTime = IndexSettings.TIME_SERIES_END_TIME.get(getIndexResponse.getSettings().get(backingIndexName)); // index another doc and verify index @@ -194,7 +194,7 @@ public void testTimeRanges() throws Exception { var newBackingIndexName = rolloverResponse.getNewIndex(); // index and check target index is new - getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(newBackingIndexName)).actionGet(); + getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(newBackingIndexName)).actionGet(); Instant newStartTime = IndexSettings.TIME_SERIES_START_TIME.get(getIndexResponse.getSettings().get(newBackingIndexName)); Instant newEndTime = IndexSettings.TIME_SERIES_END_TIME.get(getIndexResponse.getSettings().get(newBackingIndexName)); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java index 17e9cca07a054..a76dac5db4540 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java @@ -183,7 +183,7 @@ public void testIndexingGettingAndSearching() throws Exception { } // validate index: - var getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices(index)).actionGet(); + var getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)).actionGet(); assertThat(getIndexResponse.getSettings().get(index).get("index.routing_path"), equalTo("[attributes.*]")); // validate mapping var mapping = getIndexResponse.mappings().get(index).getSourceAsMap(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java index f090186480b76..8026ec641d040 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java @@ -9,10 +9,6 @@ package org.elasticsearch.datastreams; -import org.elasticsearch.action.admin.indices.rollover.LazyRolloverAction; -import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; @@ -27,12 +23,7 @@ public class DataStreamFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of( - DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE, // Added in 8.12 - LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER, // Added in 8.13 - DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE, - DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14 - ); + return Set.of(); } @Override diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index cb7445705537a..7d5f4bbee32be 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -197,8 +197,7 @@ public Collection createComponents(PluginServices services) { settings, services.client(), services.clusterService(), - errorStoreInitialisationService.get(), - services.featureService() + errorStoreInitialisationService.get() ) ); dataLifecycleInitialisationService.set( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java index 642fa4923e074..71575ee88aa7d 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java @@ -19,8 +19,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; import org.elasticsearch.health.node.DslErrorInfo; import org.elasticsearch.health.node.UpdateHealthInfoCacheAction; @@ -45,12 +43,10 @@ public class DataStreamLifecycleHealthInfoPublisher { Setting.Property.Dynamic, Setting.Property.NodeScope ); - public static final NodeFeature DSL_HEALTH_INFO_FEATURE = new NodeFeature("health.dsl.info", true); private final Client client; private final ClusterService clusterService; private final DataStreamLifecycleErrorStore errorStore; - private final FeatureService featureService; private volatile int signallingErrorRetryInterval; private volatile int maxNumberOfErrorsToPublish; @@ -58,13 +54,11 @@ public DataStreamLifecycleHealthInfoPublisher( Settings settings, Client client, ClusterService clusterService, - DataStreamLifecycleErrorStore errorStore, - FeatureService featureService + DataStreamLifecycleErrorStore errorStore ) { this.client = client; this.clusterService = clusterService; this.errorStore = errorStore; - this.featureService = featureService; this.signallingErrorRetryInterval = DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING.get(settings); this.maxNumberOfErrorsToPublish = DATA_STREAM_LIFECYCLE_MAX_ERRORS_TO_PUBLISH_SETTING.get(settings); } @@ -89,9 +83,6 @@ private void updateNumberOfErrorsToPublish(int newValue) { * {@link org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService#DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING} */ public void publishDslErrorEntries(ActionListener actionListener) { - if (featureService.clusterHasFeature(clusterService.state(), DSL_HEALTH_INFO_FEATURE) == false) { - return; - } // fetching the entries that persist in the error store for more than the signalling retry interval // note that we're reporting this view into the error store on every publishing iteration List errorEntriesToSignal = errorStore.getErrorsInfo( diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index ac7dabd868a3f..0bb990e544892 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -67,9 +67,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.datastreams.DataStreamFeatures; import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -183,13 +181,7 @@ public void setupServices() { () -> now, errorStore, allocationService, - new DataStreamLifecycleHealthInfoPublisher( - Settings.EMPTY, - client, - clusterService, - errorStore, - new FeatureService(List.of(new DataStreamFeatures())) - ), + new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore), globalRetentionSettings ); clientDelegate = null; @@ -1465,13 +1457,7 @@ public void testTrackingTimeStats() { () -> now.getAndAdd(delta), errorStore, mock(AllocationService.class), - new DataStreamLifecycleHealthInfoPublisher( - Settings.EMPTY, - getTransportRequestsRecordingClient(), - clusterService, - errorStore, - new FeatureService(List.of(new DataStreamFeatures())) - ), + new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, getTransportRequestsRecordingClient(), clusterService, errorStore), globalRetentionSettings ); assertThat(service.getLastRunDuration(), is(nullValue())); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java index cff6127e0729e..f8a2ac3c61029 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java @@ -24,10 +24,8 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.datastreams.DataStreamFeatures; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; import org.elasticsearch.health.node.DslErrorInfo; import org.elasticsearch.health.node.UpdateHealthInfoCacheAction; @@ -40,7 +38,6 @@ import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; @@ -83,13 +80,7 @@ public void setupServices() { final Client client = getTransportRequestsRecordingClient(); errorStore = new DataStreamLifecycleErrorStore(() -> now); - dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher( - Settings.EMPTY, - client, - clusterService, - errorStore, - new FeatureService(List.of(new DataStreamFeatures())) - ); + dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore); } @After @@ -105,16 +96,6 @@ public void testPublishDslErrorEntries() { } errorStore.recordError("testIndex", new IllegalStateException("bad state")); ClusterState stateWithHealthNode = ClusterStateCreationUtils.state(node1, node1, node1, allNodes); - stateWithHealthNode = ClusterState.builder(stateWithHealthNode) - .nodeFeatures( - Map.of( - node1.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()), - node2.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()) - ) - ) - .build(); ClusterServiceUtils.setState(clusterService, stateWithHealthNode); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { @Override @@ -143,16 +124,6 @@ public void testPublishDslErrorEntriesNoHealthNode() { errorStore.recordError("testIndex", new IllegalStateException("bad state")); ClusterState stateNoHealthNode = ClusterStateCreationUtils.state(node1, node1, null, allNodes); - stateNoHealthNode = ClusterState.builder(stateNoHealthNode) - .nodeFeatures( - Map.of( - node1.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()), - node2.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()) - ) - ) - .build(); ClusterServiceUtils.setState(clusterService, stateNoHealthNode); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { @Override @@ -170,16 +141,6 @@ public void onFailure(Exception e) { public void testPublishDslErrorEntriesEmptyErrorStore() { // publishes the empty error store (this is the "back to healthy" state where all errors have been fixed) ClusterState state = ClusterStateCreationUtils.state(node1, node1, node1, allNodes); - state = ClusterState.builder(state) - .nodeFeatures( - Map.of( - node1.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()), - node2.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()) - ) - ) - .build(); ClusterServiceUtils.setState(clusterService, state); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { @Override diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index 9ea3bfefabdf8..884adb5458102 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -300,9 +300,6 @@ index without timestamp with pipeline: --- dynamic templates: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -450,9 +447,6 @@ dynamic templates: --- dynamic templates - conflicting aliases: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -549,9 +543,6 @@ dynamic templates - conflicting aliases: --- dynamic templates - conflicting aliases with top-level field: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [otel] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -632,9 +623,6 @@ dynamic templates - conflicting aliases with top-level field: --- dynamic templates with nesting: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -810,10 +798,6 @@ dynamic templates with nesting: --- dynamic templates with incremental indexing: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -1038,9 +1022,6 @@ dynamic templates with incremental indexing: --- subobject in passthrough object auto flatten: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-passthrough-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-passthrough-template] will take precedence during new index creation" @@ -1108,9 +1089,6 @@ enable subobjects in passthrough object: --- passthrough objects with duplicate priority: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: catch: /has a conflicting param/ indices.put_index_template: @@ -1135,9 +1113,6 @@ passthrough objects with duplicate priority: --- dimensions with ignore_malformed and ignore_above: - - requires: - cluster_features: ["mapper.keyword_dimension_ignore_above"] - reason: support for ignore_above on keyword dimensions - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -1229,9 +1204,6 @@ dimensions with ignore_malformed and ignore_above: --- non string dimension fields: - - requires: - cluster_features: ["mapper.pass_through_priority", "routing.boolean_routing_path", "mapper.boolean_dimension"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -1339,10 +1311,6 @@ non string dimension fields: --- multi value dimensions: - - requires: - cluster_features: ["routing.multi_value_routing_path"] - reason: support for multi-value dimensions - - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java index aa48c73cf1d73..08efe87e6fde5 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java @@ -160,11 +160,6 @@ public void writeTo(StreamOutput out) throws IOException { if (provider instanceof Maxmind maxmind) { out.writeString(maxmind.accountId); } else { - /* - * The existence of a non-Maxmind providers is gated on the feature get_database_configuration_action.multi_node, and - * get_database_configuration_action.multi_node is only available on or after - * TransportVersions.INGEST_GEO_DATABASE_PROVIDERS. - */ assert false : "non-maxmind DatabaseConfiguration.Provider [" + provider.getWriteableName() + "]"; } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java index c83c40e56b749..a1faaf1bb0196 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.geoip.DatabaseNodeService; import org.elasticsearch.ingest.geoip.GeoIpTaskState; import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; @@ -41,8 +40,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.elasticsearch.ingest.IngestGeoIpFeatures.GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE; - public class TransportGetDatabaseConfigurationAction extends TransportNodesAction< GetDatabaseConfigurationAction.Request, GetDatabaseConfigurationAction.Response, @@ -50,7 +47,6 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio GetDatabaseConfigurationAction.NodeResponse, List> { - private final FeatureService featureService; private final DatabaseNodeService databaseNodeService; @Inject @@ -59,7 +55,6 @@ public TransportGetDatabaseConfigurationAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - FeatureService featureService, DatabaseNodeService databaseNodeService ) { super( @@ -70,39 +65,9 @@ public TransportGetDatabaseConfigurationAction( GetDatabaseConfigurationAction.NodeRequest::new, threadPool.executor(ThreadPool.Names.MANAGEMENT) ); - this.featureService = featureService; this.databaseNodeService = databaseNodeService; } - @Override - protected void doExecute( - Task task, - GetDatabaseConfigurationAction.Request request, - ActionListener listener - ) { - if (featureService.clusterHasFeature(clusterService.state(), GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE) == false) { - /* - * TransportGetDatabaseConfigurationAction used to be a TransportMasterNodeAction, and not all nodes in the cluster have been - * updated. So we don't want to send node requests to the other nodes because they will blow up. Instead, we just return - * the information that we used to return from the master node (it doesn't make any difference that this might not be the master - * node, because we're only reading the cluster state). Because older nodes only know about the Maxmind provider type, we filter - * out all others here to avoid causing problems on those nodes. - */ - newResponseAsync( - task, - request, - createActionContext(task, request).stream() - .filter(database -> database.database().provider() instanceof DatabaseConfiguration.Maxmind) - .toList(), - List.of(), - List.of(), - listener - ); - } else { - super.doExecute(task, request, listener); - } - } - protected List createActionContext(Task task, GetDatabaseConfigurationAction.Request request) { final Set ids; if (request.getDatabaseIds().length == 0) { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java index dfb8fa78089d2..e68bb9d82e91b 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request; import org.elasticsearch.injection.guice.Inject; @@ -42,8 +41,6 @@ import java.util.Map; import java.util.Optional; -import static org.elasticsearch.ingest.IngestGeoIpFeatures.PUT_DATABASE_CONFIGURATION_ACTION_IPINFO; - public class TransportPutDatabaseConfigurationAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportPutDatabaseConfigurationAction.class); @@ -61,7 +58,6 @@ public void taskSucceeded(UpdateDatabaseConfigurationTask task, Void unused) { } }; - private final FeatureService featureService; private final MasterServiceTaskQueue updateDatabaseConfigurationTaskQueue; @Inject @@ -70,8 +66,7 @@ public TransportPutDatabaseConfigurationAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - FeatureService featureService + IndexNameExpressionResolver indexNameExpressionResolver ) { super( PutDatabaseConfigurationAction.NAME, @@ -84,7 +79,6 @@ public TransportPutDatabaseConfigurationAction( AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE ); - this.featureService = featureService; this.updateDatabaseConfigurationTaskQueue = clusterService.createTaskQueue( "update-geoip-database-configuration-state-update", Priority.NORMAL, @@ -96,18 +90,6 @@ public TransportPutDatabaseConfigurationAction( protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { final String id = request.getDatabase().id(); - // if this is an ipinfo configuration, then make sure the whole cluster supports that feature - if (request.getDatabase().provider() instanceof DatabaseConfiguration.Ipinfo - && featureService.clusterHasFeature(clusterService.state(), PUT_DATABASE_CONFIGURATION_ACTION_IPINFO) == false) { - listener.onFailure( - new IllegalArgumentException( - "Unable to use ipinfo database configurations in mixed-clusters with nodes that do not support feature " - + PUT_DATABASE_CONFIGURATION_ACTION_IPINFO.id() - ) - ); - return; - } - updateDatabaseConfigurationTaskQueue.submitTask( Strings.format("update-geoip-database-configuration-[%s]", id), new UpdateDatabaseConfigurationTask(listener, request.getDatabase()), diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml index a1104505bc240..007c82db4c923 100644 --- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml +++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml @@ -1,9 +1,3 @@ ---- -setup: - - requires: - cluster_features: ["geoip.downloader.database.configuration", "get_database_configuration_action.multi_node"] - reason: "geoip downloader database configuration APIs added in 8.15, and updated in 8.16 to return more results" - --- teardown: - do: diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml index fd73c715a5ac5..0947984769529 100644 --- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml +++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml @@ -1,9 +1,3 @@ -setup: - - requires: - cluster_features: - - "put_database_configuration_action.ipinfo" - reason: "ipinfo support added in 8.16" - --- "Test ip_location processor with defaults": - do: diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml index e2e9a1fdb5e28..47f09392df60e 100644 --- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml +++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml @@ -1,10 +1,3 @@ ---- -setup: - - requires: - cluster_features: - - "put_database_configuration_action.ipinfo" - reason: "ip location downloader database configuration APIs added in 8.16 to support more types" - --- teardown: - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml index 25088f51e2b59..1434450b65a6a 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml @@ -221,9 +221,6 @@ setup: - close_to: {hits.hits.2._score: {value: 186.34454, error: 0.01}} --- "Test hamming distance fails on float": - - requires: - cluster_features: ["script.hamming"] - reason: "support for hamming distance added in 8.15" - do: headers: Content-Type: application/json diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml index cdd65ca0eb296..05a10ffdbccdb 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml @@ -1,7 +1,5 @@ setup: - requires: - cluster_features: ["mapper.vectors.bit_vectors"] - reason: "support for bit vectors added in 8.15" test_runner_features: headers - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml index 373f048e7be78..a6c111be681f9 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml @@ -1,7 +1,5 @@ setup: - requires: - cluster_features: ["script.hamming"] - reason: "support for hamming distance added in 8.15" test_runner_features: headers - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml index f82b844f01588..3a869640993f4 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: ["script.term_stats"] - reason: "support for term stats has been added in 8.16" - - do: indices.create: index: test-index diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml index de4d6530f4a92..3a9c71e3c2bab 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: ["script.term_stats"] - reason: "support for term stats has been added in 8.16" - - do: indices.create: index: test-index diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index 968e93cf9fc55..175abe183106b 100644 --- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -251,11 +251,6 @@ setup: --- "Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - do: cluster.stats: {} diff --git a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml index e8c34a4b6a20b..d2370919297a3 100644 --- a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml +++ b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml @@ -234,11 +234,6 @@ setup: --- "Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - do: cluster.stats: {} diff --git a/modules/repository-s3/src/main/plugin-metadata/entitlement-policy.yaml b/modules/repository-s3/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..4c42ec110a257 --- /dev/null +++ b/modules/repository-s3/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,4 @@ +ALL-UNNAMED: + - network: + actions: + - connect diff --git a/modules/repository-url/src/main/plugin-metadata/entitlement-policy.yaml b/modules/repository-url/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..f1dc1fc7755ef --- /dev/null +++ b/modules/repository-url/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,4 @@ +org.apache.httpcomponents.httpclient: + - network: + actions: + - connect # for URLHttpClient diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index cad839bed9555..5876945cf93b6 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -79,7 +79,7 @@ public void executeHandshake( if (doHandshake) { super.executeHandshake(node, channel, profile, listener); } else { - assert getVersion().equals(TransportVersion.current()); + assert version.equals(TransportVersion.current()); listener.onResponse(TransportVersions.MINIMUM_COMPATIBLE); } } diff --git a/muted-tests.yml b/muted-tests.yml index 9766d3ed35f18..d95095ac81df4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -193,9 +193,6 @@ tests: - class: org.elasticsearch.cluster.service.MasterServiceTests method: testThreadContext issue: https://github.com/elastic/elasticsearch/issues/118914 -- class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests - method: testInvalidToken - issue: https://github.com/elastic/elasticsearch/issues/119019 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT issue: https://github.com/elastic/elasticsearch/issues/115727 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT @@ -213,9 +210,6 @@ tests: - class: org.elasticsearch.smoketest.MlWithSecurityIT method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config} issue: https://github.com/elastic/elasticsearch/issues/119548 -- class: org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshotTests - method: testSkipNonRootOfNestedDocuments - issue: https://github.com/elastic/elasticsearch/issues/119553 - class: org.elasticsearch.xpack.ml.integration.ForecastIT method: testOverflowToDisk issue: https://github.com/elastic/elasticsearch/issues/117740 @@ -224,9 +218,6 @@ tests: - class: org.elasticsearch.search.profile.dfs.DfsProfilerIT method: testProfileDfs issue: https://github.com/elastic/elasticsearch/issues/119711 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testGetServicesWithCompletionTaskType - issue: https://github.com/elastic/elasticsearch/issues/119959 - class: org.elasticsearch.multi_cluster.MultiClusterYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/119983 - class: org.elasticsearch.xpack.test.rest.XPackRestIT @@ -235,25 +226,9 @@ tests: - class: org.elasticsearch.index.mapper.IntervalThrottlerTests method: testThrottling issue: https://github.com/elastic/elasticsearch/issues/120023 -- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedIT - method: testCheckThrows {pathPrefix=denied actionName=sslSessionImpl_getSessionContext} - issue: https://github.com/elastic/elasticsearch/issues/120053 -- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedIT - method: testCheckThrows {pathPrefix=denied_nonmodular actionName=sslSessionImpl_getSessionContext} - issue: https://github.com/elastic/elasticsearch/issues/120054 - class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT method: testUpdatePolicyToAddPhasesYieldsInvalidActionsToBeSkipped issue: https://github.com/elastic/elasticsearch/issues/118406 -- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsIT - issue: https://github.com/elastic/elasticsearch/issues/120088 -- class: org.elasticsearch.xpack.searchablesnapshots.minio.MinioSearchableSnapshotsIT - issue: https://github.com/elastic/elasticsearch/issues/120101 -- class: org.elasticsearch.repositories.s3.S3RepositoryThirdPartyTests - issue: https://github.com/elastic/elasticsearch/issues/120115 -- class: org.elasticsearch.repositories.s3.RepositoryS3MinioBasicCredentialsRestIT - issue: https://github.com/elastic/elasticsearch/issues/120117 -- class: org.elasticsearch.repositories.blobstore.testkit.analyze.MinioRepositoryAnalysisRestIT - issue: https://github.com/elastic/elasticsearch/issues/118548 - class: org.elasticsearch.xpack.security.QueryableReservedRolesIT method: testConfiguredReservedRolesAfterClosingAndOpeningIndex issue: https://github.com/elastic/elasticsearch/issues/120127 diff --git a/plugins/discovery-ec2/src/main/plugin-metadata/entitlement-policy.yaml b/plugins/discovery-ec2/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..4c42ec110a257 --- /dev/null +++ b/plugins/discovery-ec2/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,4 @@ +ALL-UNNAMED: + - network: + actions: + - connect diff --git a/plugins/mapper-annotated-text/src/main/java/module-info.java b/plugins/mapper-annotated-text/src/main/java/module-info.java index 13f2bd66418be..58aca0d2857fe 100644 --- a/plugins/mapper-annotated-text/src/main/java/module-info.java +++ b/plugins/mapper-annotated-text/src/main/java/module-info.java @@ -15,6 +15,4 @@ requires org.apache.lucene.highlighter; // exports nothing - - provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.index.mapper.annotatedtext.Features; } diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 33b5db1c4662d..4b2006430b89e 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -22,7 +22,6 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -64,8 +63,6 @@ **/ public class AnnotatedTextFieldMapper extends FieldMapper { - public static final NodeFeature SYNTHETIC_SOURCE_SUPPORT = new NodeFeature("mapper.annotated_text.synthetic_source", true); - public static final String CONTENT_TYPE = "annotated_text"; private static Builder builder(FieldMapper in) { diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java deleted file mode 100644 index 51a2d2bbe1d40..0000000000000 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper.annotatedtext; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -/** - * Provides features for annotated text mapper. - */ -public class Features implements FeatureSpecification { - @Override - public Set getFeatures() { - return Set.of( - AnnotatedTextFieldMapper.SYNTHETIC_SOURCE_SUPPORT // Added in 8.15 - ); - } -} diff --git a/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification deleted file mode 100644 index 1fc11da18fc3c..0000000000000 --- a/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ /dev/null @@ -1,10 +0,0 @@ -# - # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - # or more contributor license agreements. Licensed under the "Elastic License - # 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - # Public License v 1"; you may not use this file except in compliance with, at - # your election, the "Elastic License 2.0", the "GNU Affero General Public - # License v3.0 only", or the "Server Side Public License, v 1". -# - -org.elasticsearch.index.mapper.annotatedtext.Features diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index 5bbade8cf6fce..1e9788c69dba9 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -45,11 +45,11 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster localCluster useCluster remoteCluster systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '') + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(localCluster.name).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.rest.remote_cluster', getClusterInfo(remoteCluster.name).map { it.allHttpSocketURI.join(",") }) - doFirst { - nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) - } + def fipsDisabled = buildParams.inFipsJvm == false + onlyIf("FIPS mode disabled") { fipsDisabled } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { @@ -60,28 +60,28 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> cluster.nodes.forEach { node -> node.getAllTransportPortURI() } - cluster.nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(cluster) } } tasks.register("${baseName}#oneThirdUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oldClusterTest" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } tasks.register("${baseName}#twoThirdUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oneThirdUpgraded" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } tasks.register("${baseName}#fullUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#twoThirdUpgraded" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java index 13c647983fad5..392f2037139a3 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java @@ -13,8 +13,8 @@ import org.apache.http.entity.InputStreamEntity; import org.elasticsearch.client.Request; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.XContentTestUtils; @@ -42,7 +42,9 @@ import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase { @@ -156,8 +158,16 @@ protected static Version indexVersion(String indexName) throws Exception { return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100)); } + protected static int getNumberOfReplicas(String indexName) throws Exception { + var indexSettings = (Map) ((Map) getIndexSettings(indexName).get(indexName)).get("settings"); + var numberOfReplicas = Integer.parseInt((String) indexSettings.get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS)); + assertThat(numberOfReplicas, allOf(greaterThanOrEqualTo(0), lessThanOrEqualTo(NODES - 1))); + return numberOfReplicas; + } + protected static void indexDocs(String indexName, int numDocs) throws Exception { var request = new Request("POST", "/_bulk"); + request.addParameter("refresh", "true"); var docs = new StringBuilder(); IntStream.range(0, numDocs).forEach(n -> docs.append(Strings.format(""" {"index":{"_index":"%s"}} @@ -185,19 +195,30 @@ protected static void mountIndex(String repository, String snapshot, String inde } protected static void restoreIndex(String repository, String snapshot, String indexName, String renamedIndexName) throws Exception { + restoreIndex(repository, snapshot, indexName, renamedIndexName, Settings.EMPTY); + } + + protected static void restoreIndex( + String repository, + String snapshot, + String indexName, + String renamedIndexName, + Settings indexSettings + ) throws Exception { var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); request.addParameter("wait_for_completion", "true"); - request.setJsonEntity(org.elasticsearch.common.Strings.format(""" + request.setJsonEntity(Strings.format(""" { "indices": "%s", "include_global_state": false, "rename_pattern": "(.+)", "rename_replacement": "%s", - "include_aliases": false - }""", indexName, renamedIndexName)); + "include_aliases": false, + "index_settings": %s + }""", indexName, renamedIndexName, Strings.toString(indexSettings))); var responseBody = createFromResponse(client().performRequest(request)); - assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); - assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.successful"))); + assertThat(responseBody.evaluate("snapshot.shards.failed"), equalTo(0)); } protected static void updateRandomIndexSettings(String indexName) throws IOException { @@ -215,20 +236,19 @@ protected static void updateRandomIndexSettings(String indexName) throws IOExcep updateIndexSettings(indexName, settings); } - protected static void updateRandomMappings(String indexName) throws IOException { + protected static void updateRandomMappings(String indexName) throws Exception { final var runtime = new HashMap<>(); runtime.put("field_" + randomInt(2), Map.of("type", "keyword")); final var properties = new HashMap<>(); properties.put(randomIdentifier(), Map.of("type", "long")); - var body = XContentTestUtils.convertToXContent(Map.of("runtime", runtime, "properties", properties), XContentType.JSON); + updateMappings(indexName, Map.of("runtime", runtime, "properties", properties)); + } + + protected static void updateMappings(String indexName, Map mappings) throws Exception { + var body = XContentTestUtils.convertToXContent(mappings, XContentType.JSON); var request = new Request("PUT", indexName + "/_mappings"); request.setEntity( - new InputStreamEntity( - body.streamInput(), - body.length(), - - ContentType.create(XContentType.JSON.mediaTypeWithoutParameters()) - ) + new InputStreamEntity(body.streamInput(), body.length(), ContentType.create(XContentType.JSON.mediaTypeWithoutParameters())) ); assertOK(client().performRequest(request)); } @@ -238,4 +258,14 @@ protected static boolean isIndexClosed(String indexName) throws Exception { var state = responseBody.evaluate("metadata.indices." + indexName + ".state"); return IndexMetadata.State.fromString((String) state) == IndexMetadata.State.CLOSE; } + + protected static void addIndexWriteBlock(String indexName) throws Exception { + assertAcknowledged(client().performRequest(new Request("PUT", Strings.format("/%s/_block/write", indexName)))); + } + + protected static void forceMerge(String indexName, int maxNumSegments) throws Exception { + var request = new Request("POST", '/' + indexName + "/_forcemerge"); + request.addParameter("max_num_segments", String.valueOf(maxNumSegments)); + assertOK(client().performRequest(request)); + } } diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java index 15d41cc981cea..89fefde08b9a4 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java @@ -9,18 +9,13 @@ package org.elasticsearch.lucene; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.cluster.util.Version; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.Matchers.allOf; +import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING; import static org.hamcrest.Matchers.equalTo; public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRestartIndexCompatibilityTestCase { @@ -34,7 +29,90 @@ public FullClusterRestartLuceneIndexCompatibilityIT(Version version) { } /** - * Creates an index and a snapshot on N-2, then restores the snapshot on N. + * Creates an index on N-2, upgrades to N -1 and marks as read-only, then upgrades to N. + */ + public void testIndexUpgrade() throws Exception { + final String index = suffix("index"); + final int numDocs = 2431; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomInt(2)) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + return; + } + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + return; + } + + if (isFullyUpgradedTo(VERSION_CURRENT)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + var indexSettings = getIndexSettingsAsMap(index); + assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString())); + assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString())); + + var numberOfReplicas = getNumberOfReplicas(index); + if (0 < numberOfReplicas) { + logger.debug("--> resetting number of replicas [{}] to [0]", numberOfReplicas); + updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)); + } + + updateRandomIndexSettings(index); + updateRandomMappings(index); + + logger.debug("--> adding replica to test peer-recovery"); + updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(index); + + logger.debug("--> closing restored index [{}]", index); + closeIndex(index); + ensureGreen(index); + + logger.debug("--> adding replica to test peer-recovery for closed shards"); + updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)); + ensureGreen(index); + + logger.debug("--> re-opening restored index [{}]", index); + openIndex(index); + ensureGreen(index); + + assertDocCount(client(), index, numDocs); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + } + } + + /** + * Creates an index on N-2, marks as read-only on N-1 and creates a snapshot, then restores the snapshot on N. */ public void testRestoreIndex() throws Exception { final String repository = suffix("repository"); @@ -59,9 +137,6 @@ public void testRestoreIndex() throws Exception { logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); indexDocs(index, numDocs); - - logger.debug("--> creating snapshot [{}]", snapshot); - createSnapshot(client(), repository, snapshot, true); return; } @@ -71,6 +146,18 @@ public void testRestoreIndex() throws Exception { assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); assertDocCount(client(), index, numDocs); + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + logger.debug("--> deleting index [{}]", index); deleteIndex(index); return; @@ -79,32 +166,109 @@ public void testRestoreIndex() throws Exception { if (isFullyUpgradedTo(VERSION_CURRENT)) { var restoredIndex = suffix("index-restored"); logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex); + restoreIndex(repository, snapshot, index, restoredIndex); + ensureGreen(restoredIndex); + + assertThat(indexVersion(restoredIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), restoredIndex, numDocs); + + updateRandomIndexSettings(restoredIndex); + updateRandomMappings(restoredIndex); + + logger.debug("--> adding replica to test peer-recovery"); + updateIndexSettings(restoredIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(restoredIndex); + + logger.debug("--> closing restored index [{}]", restoredIndex); + closeIndex(restoredIndex); + ensureGreen(restoredIndex); + + logger.debug("--> adding replica to test peer-recovery for closed shards"); + updateIndexSettings(restoredIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)); + ensureGreen(restoredIndex); + + logger.debug("--> re-opening restored index [{}]", restoredIndex); + openIndex(restoredIndex); + ensureGreen(restoredIndex); + + assertDocCount(client(), restoredIndex, numDocs); - // Restoring the index will fail as Elasticsearch does not support reading N-2 yet - var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); - request.addParameter("wait_for_completion", "true"); - request.setJsonEntity(Strings.format(""" - { - "indices": "%s", - "include_global_state": false, - "rename_pattern": "(.+)", - "rename_replacement": "%s", - "include_aliases": false - }""", index, restoredIndex)); - - var responseException = expectThrows(ResponseException.class, () -> client().performRequest(request)); - assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); - assertThat( - responseException.getMessage(), - allOf( - containsString("cannot restore index [[" + index), - containsString("because it cannot be upgraded"), - containsString("has current compatibility version [" + VERSION_MINUS_2 + '-' + VERSION_MINUS_1.getMajor() + ".0.0]"), - containsString("but the minimum compatible version is [" + VERSION_MINUS_1.getMajor() + ".0.0]."), - containsString("It should be re-indexed in Elasticsearch " + VERSION_MINUS_1.getMajor() + ".x"), - containsString("before upgrading to " + VERSION_CURRENT) - ) + logger.debug("--> deleting restored index [{}]", restoredIndex); + deleteIndex(restoredIndex); + } + } + + /** + * Creates an index on N-2, marks as read-only on N-1 and creates a snapshot and then closes the index, then restores the snapshot on N. + */ + public void testRestoreIndexOverClosedIndex() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 2134; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + return; + } + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> force-merge index [{}] to 1 segment", index); + forceMerge(index, 1); + + logger.debug("--> closing index [{}]", index); + closeIndex(index); + ensureGreen(index); + return; + } + + if (isFullyUpgradedTo(VERSION_CURRENT)) { + var indexSettings = getIndexSettingsAsMap(index); + assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString())); + assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString())); + assertThat(isIndexClosed(index), equalTo(true)); + + logger.debug("--> restoring index [{}] over existing closed index", index); + restoreIndex(repository, snapshot, index, index); + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); } } } diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java index a7dc5e41fd327..477f2099477cc 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java @@ -155,9 +155,11 @@ public void testSearchableSnapshotUpgrade() throws Exception { assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); assertDocCount(client(), mountedIndex, numDocs); - logger.debug("--> adding replica to test replica upgrade"); - updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); - ensureGreen(mountedIndex); + if (randomBoolean()) { + logger.debug("--> adding replica to test upgrade with replica"); + updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(mountedIndex); + } if (randomBoolean()) { logger.debug("--> random closing of index [{}] before upgrade", mountedIndex); diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java index 03b6a9292e355..85fc4abc5e066 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java @@ -73,6 +73,12 @@ protected void maybeUpgrade() throws Exception { closeClients(); cluster().upgradeNodeToVersion(i, expectedNodeVersion); initClient(); + + ensureHealth((request -> { + request.addParameter("timeout", "70s"); + request.addParameter("wait_for_nodes", String.valueOf(NODES)); + request.addParameter("wait_for_status", "yellow"); + })); } currentNodeVersion = nodesVersions().get(nodeName); diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeLuceneIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeLuceneIndexCompatibilityTestCase.java new file mode 100644 index 0000000000000..c183ccc39cdea --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeLuceneIndexCompatibilityTestCase.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.util.Version; + +import java.util.List; + +import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING; +import static org.hamcrest.Matchers.equalTo; + +public class RollingUpgradeLuceneIndexCompatibilityTestCase extends RollingUpgradeIndexCompatibilityTestCase { + + static { + clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial"); + } + + public RollingUpgradeLuceneIndexCompatibilityTestCase(List nodesVersions) { + super(nodesVersions); + } + + /** + * Creates an index on N-2, upgrades to N -1 and marks as read-only, then remains searchable during rolling upgrades. + */ + public void testIndexUpgrade() throws Exception { + final String index = suffix("index-rolling-upgraded"); + final int numDocs = 2543; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + return; + } + + ensureGreen(index); + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + return; + } + + if (nodesVersions().values().stream().anyMatch(v -> v.onOrAfter(VERSION_CURRENT))) { + var indexSettings = getIndexSettingsAsMap(index); + assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString())); + assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString())); + + if (isIndexClosed(index)) { + logger.debug("--> re-opening index [{}] after upgrade", index); + openIndex(index); + ensureGreen(index); + } + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + updateRandomIndexSettings(index); + updateRandomMappings(index); + + if (randomBoolean()) { + logger.debug("--> random closing of index [{}] before upgrade", index); + closeIndex(index); + ensureGreen(index); + } + } + } + + /** + * Creates an index on N-2, marks as read-only on N-1 and creates a snapshot, then restores the snapshot during rolling upgrades to N. + */ + public void testRestoreIndex() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 1234; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + return; + } + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + if (nodesVersions().values().stream().anyMatch(v -> v.onOrAfter(VERSION_CURRENT))) { + var restoredIndex = suffix("index-restored-rolling"); + boolean success = false; + try { + + logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex); + restoreIndex(repository, snapshot, index, restoredIndex); + ensureGreen(restoredIndex); + + assertThat(indexVersion(restoredIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), restoredIndex, numDocs); + + updateRandomIndexSettings(restoredIndex); + updateRandomMappings(restoredIndex); + + logger.debug("--> closing restored index [{}]", restoredIndex); + closeIndex(restoredIndex); + ensureGreen(restoredIndex); + + logger.debug("--> re-opening restored index [{}]", restoredIndex); + openIndex(restoredIndex); + ensureGreen(restoredIndex); + + assertDocCount(client(), restoredIndex, numDocs); + + logger.debug("--> deleting restored index [{}]", restoredIndex); + deleteIndex(restoredIndex); + + success = true; + } finally { + if (success == false) { + try { + client().performRequest(new Request("DELETE", "/" + restoredIndex)); + } catch (ResponseException e) { + logger.warn("Failed to delete restored index [" + restoredIndex + ']', e); + } + } + } + } + } +} diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 8dd5031c07822..0889837457285 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -82,7 +82,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { versions = [bwcVersion.toString(), project.version] numberOfNodes = 4 - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + setting 'path.repo', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' setting "xpack.license.self_generated.type", "trial" /* There is a chance we have more master changes than "normal", so to avoid this test from failing, @@ -96,50 +96,32 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { useCluster baseCluster mustRunAfter("precommit") - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - - def baseInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set(baseName) - it.parameters.service = serviceProvider - }.map { it.getAllHttpSocketURI() } - - def baseInfoAfterOneNodeUpdate = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set(baseName) - it.parameters.service = serviceProvider - }.map { it.getAllHttpSocketURI() } - - def baseInfoAfterTwoNodesUpdate = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set(baseName) - it.parameters.service = serviceProvider - }.map { it.getAllHttpSocketURI() } - def nonInputProps = nonInputProperties - def sharedRepoFolder = new File(buildDir, "cluster/shared/repo/${baseName}") + def baseInfo = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def baseInfoAfterOneNodeUpdate = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def baseInfoAfterTwoNodesUpdate = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def sharedRepoFolder = layout.buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile doFirst { delete(sharedRepoFolder) // Getting the endpoints causes a wait for the cluster println "Test cluster endpoints are: ${-> baseInfo.get().join(",")}" println "Upgrading one node to create a mixed cluster" - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) + // Getting the endpoints causes a wait for the cluster - println "Upgrade complete, endpoints are: ${-> baseInfoAfterOneNodeUpdate.get().join(",")}" + println "Upgrade complete, endpoints are: ${-> baseInfoAfterOneNodeUpdate.get()}" println "Upgrading another node to create a mixed cluster" - baseCluster.get().nextNodeToNextVersion() - nonInputProps.systemProperty('tests.rest.cluster', baseInfoAfterTwoNodesUpdate.map(c -> c.join(","))) - nonInputProps.systemProperty('tests.clustername', baseName) - if (excludeList.isEmpty() == false) { - systemProperty 'tests.rest.blacklist', excludeList.join(',') - } + getRegistry().get().nextNodeToNextVersion(baseCluster) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') } - systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + nonInputProperties.systemProperty('tests.rest.cluster', baseInfoAfterTwoNodesUpdate) + nonInputProperties.systemProperty('tests.clustername', baseName) + systemProperty 'tests.path.repo', "${layout.buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile}" systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') -// onlyIf("BWC tests disabled") { project.bwc_tests_enabled } + def bwcEnabled = project.bwc_tests_enabled + onlyIf("BWC tests disabled") { bwcEnabled } } tasks.register(bwcTaskName(bwcVersion)) { diff --git a/qa/rolling-upgrade-legacy/build.gradle b/qa/rolling-upgrade-legacy/build.gradle index e1c31fd50c0d4..839daaf1a949d 100644 --- a/qa/rolling-upgrade-legacy/build.gradle +++ b/qa/rolling-upgrade-legacy/build.gradle @@ -40,7 +40,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> numberOfNodes = 3 setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + setting 'path.repo', "${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") } @@ -52,12 +52,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster baseCluster mustRunAfter("precommit") doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") + delete("${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}") } def excludeList = [] systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', oldVersion - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) if (excludeList.isEmpty() == false) { systemProperty 'tests.rest.blacklist', excludeList.join(',') @@ -68,12 +68,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oldClusterTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.first_round', 'true' - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) def excludeList = [] if (excludeList.isEmpty() == false) { @@ -85,12 +85,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oneThirdUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.first_round', 'false' - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) def excludeList = [] if (excludeList.isEmpty() == false) { @@ -101,12 +101,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { dependsOn "${baseName}#twoThirdsUpgradedTest" doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } useCluster testClusters.named(baseName) systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', oldVersion - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) def excludeList = [] if (excludeList.isEmpty() == false) { diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index d9adec47ff483..30367bf55d8cc 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Build; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; import org.elasticsearch.cluster.metadata.DesiredNode; @@ -84,7 +83,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve randomDoubleProcessorCount(), ByteSizeValue.ofGb(randomIntBetween(10, 24)), ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + null ) ) .toList(); @@ -96,7 +95,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)), ByteSizeValue.ofGb(randomIntBetween(10, 24)), ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + null ); }).toList(); } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index d4843fb152888..4a5ceeb66f661 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -222,10 +222,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.validation.templates"] - reason: "ingest simulate index mapping validation added in 8.16" - - do: headers: Content-Type: application/json @@ -313,10 +309,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.component.template.substitutions"] - reason: "ingest simulate component template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -494,10 +486,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.component.template.substitutions"] - reason: "ingest simulate component template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -617,10 +605,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.component.template.substitutions"] - reason: "ingest simulate component template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -816,10 +800,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.index.template.substitutions"] - reason: "ingest simulate index template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -1010,10 +990,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.index.template.substitutions"] - reason: "ingest simulate component template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -1227,10 +1203,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.addition"] - reason: "ingest simulate mapping addition added in 8.17" - - do: headers: Content-Type: application/json @@ -1463,10 +1435,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.addition"] - reason: "ingest simulate mapping addition added in 8.17" - - do: indices.put_template: name: my-legacy-template @@ -1584,10 +1552,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.support.non.template.mapping"] - reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17" - # A global match-everything legacy template is added to the cluster sometimes (rarely). We have to get rid of this template if it exists # because this test is making sure we get correct behavior when an index matches *no* template: - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json index 96d477160b277..aa5a3dc0a791f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json @@ -28,6 +28,11 @@ ] }, "params": { + "hard": { + "type": "boolean", + "default": false, + "description": "If true, the connector doc is deleted. If false, connector doc is marked as deleted (soft-deleted)." + }, "delete_sync_jobs": { "type": "boolean", "default": false, diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml index 9da6d2c5f086e..ce3f7f0198399 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml @@ -1,9 +1,5 @@ --- setup: - - requires: - cluster_features: "mapper.query_index_mode" - reason: "require index_mode" - - do: indices.create: index: test_metrics diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index 13f6ca58ea295..a0061272a2c23 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -1014,10 +1014,6 @@ flattened field: --- flattened field with ignore_above: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -1070,10 +1066,6 @@ flattened field with ignore_above: --- flattened field with ignore_above and arrays: - - requires: - cluster_features: ["mapper.flattened.ignore_above_with_arrays_support"] - reason: requires support of ignore_above synthetic source with arrays - - do: indices.create: index: test @@ -1127,10 +1119,6 @@ flattened field with ignore_above and arrays: --- completion: - - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 - - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml index 414c24cfffd7d..7b8f785a2cb93 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -2,7 +2,6 @@ "Metrics object indexing": - requires: test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] - cluster_features: ["mapper.subobjects_auto"] reason: requires supporting subobjects auto setting - do: @@ -69,7 +68,6 @@ "Root with metrics": - requires: test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] - cluster_features: ["mapper.subobjects_auto"] reason: requires supporting subobjects auto setting - do: @@ -131,7 +129,6 @@ "Metrics object indexing with synthetic source": - requires: test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] - cluster_features: ["mapper.subobjects_auto"] reason: added in 8.4.0 - do: @@ -201,7 +198,6 @@ "Root without subobjects with synthetic source": - requires: test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] - cluster_features: ["mapper.subobjects_auto"] reason: added in 8.4.0 - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 5003f6df79a14..096ccbce9a58b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -5,10 +5,6 @@ setup: --- object with unmapped fields: - - requires: - cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -56,10 +52,6 @@ object with unmapped fields: --- unmapped arrays: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -102,10 +94,6 @@ unmapped arrays: --- nested object with unmapped fields: - - requires: - cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -153,10 +141,6 @@ nested object with unmapped fields: --- empty object with unmapped fields: - - requires: - cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -314,10 +298,6 @@ disabled object contains array: --- disabled subobject: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -353,10 +333,6 @@ disabled subobject: --- disabled subobject with array: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -393,10 +369,6 @@ disabled subobject with array: --- mixed disabled and enabled objects: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -441,7 +413,7 @@ mixed disabled and enabled objects: --- object with dynamic override: - requires: - cluster_features: ["mapper.ignored_source.dont_expand_dots", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.ignored_source.dont_expand_dots"] reason: requires tracking ignored source - do: @@ -488,10 +460,6 @@ object with dynamic override: --- subobject with dynamic override: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -537,10 +505,6 @@ subobject with dynamic override: --- object array in object with dynamic override: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -592,10 +556,6 @@ object array in object with dynamic override: --- value array in object with dynamic override: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -636,10 +596,6 @@ value array in object with dynamic override: --- nested object: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -682,10 +638,6 @@ nested object: --- nested object next to regular: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -729,10 +681,6 @@ nested object next to regular: --- nested object with disabled: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -818,10 +766,6 @@ nested object with disabled: --- doubly nested object: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -914,10 +858,6 @@ doubly nested object: --- subobjects auto: - - requires: - cluster_features: ["mapper.subobjects_auto", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source and supporting subobjects auto setting - - do: indices.create: index: test @@ -1003,10 +943,6 @@ subobjects auto: --- synthetic_source with copy_to: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1140,10 +1076,6 @@ synthetic_source with copy_to: --- synthetic_source with disabled doc_values: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires disabled doc_values support in synthetic source - - do: indices.create: index: test @@ -1224,10 +1156,6 @@ synthetic_source with disabled doc_values: --- fallback synthetic_source for text field: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires disabled doc_values support in synthetic source - - do: indices.create: index: test @@ -1259,10 +1187,6 @@ fallback synthetic_source for text field: --- synthetic_source with copy_to and ignored values: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1328,10 +1252,6 @@ synthetic_source with copy_to and ignored values: --- synthetic_source with copy_to field having values in source: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1392,10 +1312,6 @@ synthetic_source with copy_to field having values in source: --- synthetic_source with ignored source field using copy_to: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1457,10 +1373,6 @@ synthetic_source with ignored source field using copy_to: --- synthetic_source with copy_to field from dynamic template having values in source: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1555,7 +1467,6 @@ synthetic_source with copy_to field from dynamic template having values in sourc --- synthetic_source with copy_to and invalid values for copy: - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] reason: requires copy_to support in synthetic source test_runner_features: "contains" @@ -1592,10 +1503,6 @@ synthetic_source with copy_to and invalid values for copy: --- synthetic_source with copy_to pointing inside object: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1697,10 +1604,6 @@ synthetic_source with copy_to pointing inside object: --- synthetic_source with copy_to pointing to ambiguous field: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1745,10 +1648,6 @@ synthetic_source with copy_to pointing to ambiguous field: --- synthetic_source with copy_to pointing to ambiguous field and subobjects false: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1794,10 +1693,6 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects false: --- synthetic_source with copy_to pointing to ambiguous field and subobjects auto: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1845,7 +1740,6 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects auto: synthetic_source with copy_to pointing at dynamic field: - requires: test_runner_features: contains - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] reason: requires copy_to support in synthetic source - do: @@ -1931,10 +1825,6 @@ synthetic_source with copy_to pointing at dynamic field: --- synthetic_source with copy_to pointing inside dynamic object: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index 095665e9337b1..e51d527593d45 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -5,10 +5,6 @@ setup: --- object param - store complex object: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -70,10 +66,6 @@ object param - store complex object: --- object param - object array: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -135,10 +127,6 @@ object param - object array: --- object param - object array within array: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -179,10 +167,6 @@ object param - object array within array: --- object param - no object array: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -222,10 +206,6 @@ object param - no object array: --- object param - field ordering in object array: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -272,10 +252,6 @@ object param - field ordering in object array: --- object param - nested object array next to other fields: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -330,7 +306,7 @@ object param - nested object array next to other fields: --- object param - nested object with stored array: - requires: - cluster_features: ["mapper.ignored_source.always_store_object_arrays_in_nested", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.ignored_source.always_store_object_arrays_in_nested"] reason: requires fix to object array handling - do: @@ -379,10 +355,6 @@ object param - nested object with stored array: --- index param - nested array within array: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -427,10 +399,6 @@ index param - nested array within array: --- # 112156 stored field under object with store_array_source: - - requires: - cluster_features: ["mapper.source.synthetic_source_stored_fields_advance_fix", "mapper.bwc_workaround_9_0"] - reason: requires bug fix to be implemented - - do: indices.create: index: test @@ -477,10 +445,6 @@ stored field under object with store_array_source: --- field param - keep root array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -535,10 +499,6 @@ field param - keep root array: --- field param - keep nested array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -605,7 +565,6 @@ field param - keep nested array: field param - keep root singleton fields: - requires: test_runner_features: close_to - cluster_features: ["mapper.synthetic_source_keep"] reason: requires keeping singleton source - do: @@ -695,7 +654,6 @@ field param - keep root singleton fields: field param - keep nested singleton fields: - requires: test_runner_features: close_to - cluster_features: ["mapper.synthetic_source_keep"] reason: requires keeping singleton source - do: @@ -776,10 +734,6 @@ field param - keep nested singleton fields: --- field param - nested array within array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -820,10 +774,6 @@ field param - nested array within array: --- index param - root arrays: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires keeping array source - - do: indices.create: index: test @@ -900,10 +850,6 @@ index param - root arrays: --- index param - dynamic root arrays: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -952,10 +898,6 @@ index param - dynamic root arrays: --- index param - object array within array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1001,10 +943,6 @@ index param - object array within array: --- index param - no object array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1045,10 +983,6 @@ index param - no object array: --- index param - field ordering: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1095,10 +1029,6 @@ index param - field ordering: --- index param - nested arrays: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1162,10 +1092,6 @@ index param - nested arrays: --- index param - nested object with stored array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1213,10 +1139,6 @@ index param - nested object with stored array: --- index param - flattened fields: - - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires keeping array source - - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 3d82539944a97..89816be5ca8e7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -453,8 +453,6 @@ --- "Composable index templates that include subobjects: auto at root": - requires: - cluster_features: ["mapper.subobjects_auto"] - reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" test_runner_features: "allowed_warnings" - do: @@ -504,8 +502,6 @@ --- "Composable index templates that include subobjects: auto on arbitrary field": - requires: - cluster_features: ["mapper.subobjects_auto"] - reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" test_runner_features: "allowed_warnings" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml index c88d638199dba..d07d03cb7146c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml @@ -1,8 +1,5 @@ --- sort doc with nested object: - - requires: - cluster_features: ["mapper.index_sorting_on_nested"] - reason: uses index sorting on nested fields - do: indices.create: index: test @@ -66,9 +63,6 @@ sort doc with nested object: --- sort doc on nested field: - - requires: - cluster_features: [ "mapper.index_sorting_on_nested" ] - reason: uses index sorting on nested fields - do: catch: /cannot apply index sort to field \[nested_field\.foo\] under nested object \[nested_field\]/ indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index 07af3fb52b92f..2a31b3bd387c4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -312,7 +312,6 @@ override sort mode settings: --- override sort field using nested field type in sorting: - requires: - cluster_features: ["mapper.index_sorting_on_nested"] test_runner_features: [ capabilities ] capabilities: - method: PUT @@ -358,9 +357,6 @@ override sort field using nested field type in sorting: --- override sort field using nested field type: - - requires: - cluster_features: ["mapper.index_sorting_on_nested"] - reason: "Support for index sorting on indexes with nested objects required" - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml index 084f104932d99..8485aba0ecc6a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml @@ -55,9 +55,6 @@ keyword: --- keyword with normalizer: - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: indices.create: index: test-keyword-with-normalizer diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 9d6e8da8c1e1e..2a14c291d5d31 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -417,7 +417,6 @@ - requires: test_runner_features: [arbitrary_key] - cluster_features: ["mapper.query_index_mode"] reason: "_ignored_source added to mappings" - do: @@ -511,10 +510,6 @@ --- "Lucene segment level fields stats": - - requires: - cluster_features: ["mapper.segment_level_fields_stats"] - reason: "segment level fields stats" - - do: indices.create: index: index1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml index 3ec854e93d82c..20e9d92a36088 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml @@ -1,8 +1,6 @@ --- "Allocation stats": - requires: - cluster_features: ["stats.include_disk_thresholds"] - reason: "fs watermark stats was added in 8.15.0" test_runner_features: [arbitrary_key] - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml index 3432a1e34c018..6ca17cc9cdce9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml @@ -520,10 +520,6 @@ setup: --- "Null bounds": - - requires: - cluster_features: ["mapper.range.null_values_off_by_one_fix"] - reason: fixed in 8.15.0 - - do: index: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml index bd14fb182ac5a..94db54d152941 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: 'unified_highlighter_matched_fields' - reason: 'test requires unified highlighter to support matched_fields' - - do: indices.create: index: index1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml index a3d920d903ae8..bc4e262ea53c6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml @@ -1,8 +1,6 @@ setup: - requires: - cluster_features: "mapper.vectors.bit_vectors" test_runner_features: close_to - reason: 'bit vectors added in 8.15' - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml index 3d4841a16d82d..cffc12a8d24ae 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml @@ -59,9 +59,6 @@ setup: --- "Simple knn query": - - requires: - cluster_features: "search.vectors.k_param_supported" - reason: 'k param for knn as query is required' - do: search: index: my_index diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml index f6538b573809a..c92c88df91641 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml @@ -1,8 +1,6 @@ # test how knn query interact with other queries setup: - requires: - cluster_features: "search.vectors.k_param_supported" - reason: 'k param for knn as query is required' test_runner_features: close_to - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml index 3f81c0044d170..abde3e86dd05b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.bbq" - reason: 'kNN float to better-binary quantization is required' - do: indices.create: index: bbq_hnsw diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml index baf568762dd17..9b27aea4b1db7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.int4_quantization" - reason: 'kNN float to half-byte quantization is required' - do: indices.create: index: hnsw_byte_quantized diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml index 0bc111576c2a9..2541de7023bf0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.bbq" - reason: 'kNN float to better-binary quantization is required' - do: indices.create: index: bbq_flat diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml index 0e0180e58fd96..f9f8d56e1d9c9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.int4_quantization" - reason: 'kNN float to half-byte quantization is required' - do: indices.create: index: int4_flat diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml index 680433a5945fd..ef2ae3ba7ee0a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.bit_vectors" - reason: 'mapper.vectors.bit_vectors' - - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml index 783f08a5d4ff4..07261e6a30c77 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.bit_vectors" - reason: 'mapper.vectors.bit_vectors' - - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml index 44d966b76f34e..8915325c3a67b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml @@ -1128,10 +1128,6 @@ fetch geo_point: --- "Test with subobjects: auto": - - requires: - cluster_features: "mapper.subobjects_auto" - reason: requires support for subobjects auto setting - - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml index 1730a49f743d9..7e00cbb01c589 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml @@ -1,8 +1,5 @@ --- ignore_above mapping level setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -42,9 +39,6 @@ ignore_above mapping level setting: --- ignore_above mapping level setting on arrays: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -84,9 +78,6 @@ ignore_above mapping level setting on arrays: --- ignore_above mapping overrides setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -128,9 +119,6 @@ ignore_above mapping overrides setting: --- ignore_above mapping overrides setting on arrays: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -172,9 +160,6 @@ ignore_above mapping overrides setting on arrays: --- date ignore_above index level setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml index 772c3c24170cd..045f757b08302 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml @@ -5,9 +5,6 @@ setup: --- ignore_above mapping level setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -47,9 +44,6 @@ ignore_above mapping level setting: --- ignore_above mapping level setting on arrays: - - requires: - cluster_features: [ "mapper.flattened.ignore_above_with_arrays_support" ] - reason: requires support of ignore_above with arrays for flattened fields - do: indices.create: index: test @@ -90,9 +84,6 @@ ignore_above mapping level setting on arrays: --- ignore_above mapping overrides setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -135,9 +126,6 @@ ignore_above mapping overrides setting: --- ignore_above mapping overrides setting on arrays: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml index 3c29845871fe7..6e711ee143b06 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml @@ -16,9 +16,6 @@ ignore_above index setting negative value: --- keyword ignore_above mapping setting negative value: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: catch: bad_request indices.create: @@ -32,9 +29,6 @@ keyword ignore_above mapping setting negative value: --- flattened ignore_above mapping setting negative value: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: catch: bad_request indices.create: @@ -48,9 +42,6 @@ flattened ignore_above mapping setting negative value: --- wildcard ignore_above mapping setting negative value: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: catch: bad_request indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml index a4a9b1aaecb22..71e0c2d147c1e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml @@ -1,8 +1,5 @@ --- flattened ignore_above single-value field: - - requires: - cluster_features: [ "flattened.ignore_above_support" ] - reason: introduce ignore_above support in flattened fields - do: indices.create: index: test @@ -65,9 +62,6 @@ flattened ignore_above single-value field: --- flattened ignore_above multi-value field: - - requires: - cluster_features: [ "flattened.ignore_above_support" ] - reason: introduce ignore_above support in flattened fields - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml index da0f00d960534..70a3b0253c78f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml @@ -119,10 +119,6 @@ setup: - skip: features: headers - - requires: - cluster_features: ["simulate.mapping.validation"] - reason: "ingest simulate index mapping validation added in 8.16" - - do: headers: Content-Type: application/json @@ -265,10 +261,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.validation.templates"] - reason: "ingest simulate index mapping validation added in 8.16" - - do: indices.put_template: name: v1_template @@ -401,10 +393,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.validation.templates"] - reason: "ingest simulate index mapping validation added in 8.16" - - do: allowed_warnings: - "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml index 616afd3cf67ad..1e841c8893fc6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml @@ -122,8 +122,6 @@ missing dimension on routing path field: multi-value routing path field succeeds: - requires: test_runner_features: close_to - cluster_features: ["routing.multi_value_routing_path"] - reason: support for multi-value dimensions - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml index beba6f2752a11..5a5ae03ab938f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml @@ -65,9 +65,6 @@ setup: --- generates a consistent id: - - requires: - cluster_features: "tsdb.ts_routing_hash_doc_value_parse_byte_ref" - reason: _tsid routing hash doc value parsing has been fixed - do: bulk: refresh: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml index dae50704dd0d0..a8d256bbc097e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml @@ -340,9 +340,6 @@ sort by tsid: --- aggs by index_mode: - - requires: - cluster_features: ["mapper.query_index_mode"] - reason: require _index_mode metadata field - do: search: index: test diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java index ea7cec710e31e..e46a0e2ab65ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -71,7 +71,7 @@ public void testAutoCreatePrimaryIndex() throws Exception { CreateIndexRequest request = new CreateIndexRequest(PRIMARY_INDEX_NAME); client().execute(AutoCreateAction.INSTANCE, request).get(); - GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices(PRIMARY_INDEX_NAME).get(); + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(PRIMARY_INDEX_NAME).get(); assertThat(response.indices().length, is(1)); assertThat(response.aliases().size(), is(1)); assertThat(response.aliases().get(PRIMARY_INDEX_NAME).size(), is(1)); @@ -85,7 +85,7 @@ public void testAutoCreatePrimaryIndexFromAlias() throws Exception { CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME); client().execute(AutoCreateAction.INSTANCE, request).get(); - GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices(PRIMARY_INDEX_NAME).get(); + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(PRIMARY_INDEX_NAME).get(); assertThat(response.indices().length, is(1)); assertThat(response.aliases().size(), is(1)); assertThat(response.aliases().get(PRIMARY_INDEX_NAME).size(), is(1)); @@ -99,7 +99,7 @@ public void testAutoCreateNonPrimaryIndex() throws Exception { CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME + "-2"); client().execute(AutoCreateAction.INSTANCE, request).get(); - GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices(INDEX_NAME + "-2").get(); + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(INDEX_NAME + "-2").get(); assertThat(response.indices().length, is(1)); assertThat(response.aliases().size(), is(1)); assertThat(response.aliases().get(INDEX_NAME + "-2").size(), is(1)); @@ -144,7 +144,9 @@ public void testSystemIndicesAutoCreatedAsHidden() throws Exception { CreateIndexRequest request = new CreateIndexRequest(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME); client().execute(AutoCreateAction.INSTANCE, request).get(); - GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME).get(); + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) + .addIndices(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME) + .get(); assertThat(response.indices().length, is(1)); Settings settings = response.settings().get(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME); assertThat(settings, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index 3945c3a48f0af..ddd3c8e53773f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -46,7 +46,7 @@ protected void setupSuiteScopeCluster() throws Exception { } public void testSimple() { - GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices("idx").get(); + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx").get(); String[] indices = response.indices(); assertThat(indices, notNullValue()); assertThat(indices.length, equalTo(1)); @@ -58,7 +58,7 @@ public void testSimple() { public void testSimpleUnknownIndex() { try { - indicesAdmin().prepareGetIndex().addIndices("missing_idx").get(); + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("missing_idx").get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { assertThat(e.getMessage(), is("no such index [missing_idx]")); @@ -66,7 +66,7 @@ public void testSimpleUnknownIndex() { } public void testUnknownIndexWithAllowNoIndices() { - GetIndexResponse response = indicesAdmin().prepareGetIndex() + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) .addIndices("missing_idx") .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) .get(); @@ -77,7 +77,7 @@ public void testUnknownIndexWithAllowNoIndices() { } public void testEmpty() { - GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices("empty_idx").get(); + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("empty_idx").get(); String[] indices = response.indices(); assertThat(indices, notNullValue()); assertThat(indices.length, equalTo(1)); @@ -88,7 +88,10 @@ public void testEmpty() { } public void testSimpleMapping() { - GetIndexResponse response = runWithRandomFeatureMethod(indicesAdmin().prepareGetIndex().addIndices("idx"), Feature.MAPPINGS); + GetIndexResponse response = runWithRandomFeatureMethod( + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx"), + Feature.MAPPINGS + ); String[] indices = response.indices(); assertThat(indices, notNullValue()); assertThat(indices.length, equalTo(1)); @@ -99,7 +102,10 @@ public void testSimpleMapping() { } public void testSimpleAlias() { - GetIndexResponse response = runWithRandomFeatureMethod(indicesAdmin().prepareGetIndex().addIndices("idx"), Feature.ALIASES); + GetIndexResponse response = runWithRandomFeatureMethod( + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx"), + Feature.ALIASES + ); String[] indices = response.indices(); assertThat(indices, notNullValue()); assertThat(indices.length, equalTo(1)); @@ -110,7 +116,10 @@ public void testSimpleAlias() { } public void testSimpleSettings() { - GetIndexResponse response = runWithRandomFeatureMethod(indicesAdmin().prepareGetIndex().addIndices("idx"), Feature.SETTINGS); + GetIndexResponse response = runWithRandomFeatureMethod( + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx"), + Feature.SETTINGS + ); String[] indices = response.indices(); assertThat(indices, notNullValue()); assertThat(indices.length, equalTo(1)); @@ -127,7 +136,7 @@ public void testSimpleMixedFeatures() { features.add(randomFrom(Feature.values())); } GetIndexResponse response = runWithRandomFeatureMethod( - indicesAdmin().prepareGetIndex().addIndices("idx"), + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx"), features.toArray(new Feature[features.size()]) ); String[] indices = response.indices(); @@ -158,7 +167,7 @@ public void testEmptyMixedFeatures() { features.add(randomFrom(Feature.values())); } GetIndexResponse response = runWithRandomFeatureMethod( - indicesAdmin().prepareGetIndex().addIndices("empty_idx"), + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("empty_idx"), features.toArray(new Feature[features.size()]) ); String[] indices = response.indices(); @@ -182,7 +191,7 @@ public void testGetIndexWithBlocks() { for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("idx", block); - GetIndexResponse response = indicesAdmin().prepareGetIndex() + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) .addIndices("idx") .addFeatures(Feature.MAPPINGS, Feature.ALIASES) .get(); @@ -200,7 +209,7 @@ public void testGetIndexWithBlocks() { try { enableIndexBlock("idx", SETTING_BLOCKS_METADATA); assertBlocked( - indicesAdmin().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), INDEX_METADATA_BLOCK ); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 746f35992e721..a7cb39ed3df9e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -805,7 +805,7 @@ public void testMultiThreadedRollover() throws Exception { assertBusy(() -> { try { - indicesAdmin().prepareGetIndex().addIndices(writeIndexPrefix + "000002").get(); + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(writeIndexPrefix + "000002").get(); } catch (Exception e) { logger.info("--> expecting second index to be created but it has not yet been created"); fail("expecting second index to exist"); @@ -824,7 +824,7 @@ public void testMultiThreadedRollover() throws Exception { }); // We should *NOT* have a third index, it should have rolled over *exactly* once - expectThrows(Exception.class, indicesAdmin().prepareGetIndex().addIndices(writeIndexPrefix + "000003")); + expectThrows(Exception.class, indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(writeIndexPrefix + "000003")); } public void testRolloverConcurrently() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index 6c7754932af68..f06810377771b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; @@ -33,8 +34,10 @@ import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; @@ -932,6 +935,102 @@ public void testGetRemoteIndex() { ); } + public void testRealTimeGetNestedFields() { + String index = "test"; + SourceFieldMapper.Mode sourceMode = randomFrom(SourceFieldMapper.Mode.values()); + assertAcked( + prepareCreate(index).setMapping("title", "type=keyword", "author", "type=nested") + .setSettings( + indexSettings(1, 0).put("index.refresh_interval", -1) + .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), sourceMode) + ) + ); + ensureGreen(); + String source0 = """ + { + "title": "t0", + "author": [ + { + "name": "a0" + } + ] + } + """; + prepareIndex(index).setRefreshPolicy(WriteRequest.RefreshPolicy.NONE).setId("0").setSource(source0, XContentType.JSON).get(); + // start tracking translog locations + assertTrue(client().prepareGet(index, "0").setRealtime(true).get().isExists()); + String source1 = """ + { + "title": ["t1"], + "author": [ + { + "name": "a1" + } + ] + } + """; + prepareIndex(index).setRefreshPolicy(WriteRequest.RefreshPolicy.NONE).setId("1").setSource(source1, XContentType.JSON).get(); + String source2 = """ + { + "title": ["t1", "t2"], + "author": [ + { + "name": "a1" + }, + { + "name": "a2" + } + ] + } + """; + prepareIndex(index).setRefreshPolicy(WriteRequest.RefreshPolicy.NONE).setId("2").setSource(source2, XContentType.JSON).get(); + String source3 = """ + { + "title": ["t1", "t3", "t2"] + } + """; + prepareIndex(index).setRefreshPolicy(WriteRequest.RefreshPolicy.NONE).setId("3").setSource(source3, XContentType.JSON).get(); + GetResponse translog1 = client().prepareGet(index, "1").setRealtime(true).get(); + GetResponse translog2 = client().prepareGet(index, "2").setRealtime(true).get(); + GetResponse translog3 = client().prepareGet(index, "3").setRealtime(true).get(); + assertTrue(translog1.isExists()); + assertTrue(translog2.isExists()); + assertTrue(translog3.isExists()); + switch (sourceMode) { + case STORED -> { + assertThat(translog1.getSourceAsBytesRef().utf8ToString(), equalTo(source1)); + assertThat(translog2.getSourceAsBytesRef().utf8ToString(), equalTo(source2)); + assertThat(translog3.getSourceAsBytesRef().utf8ToString(), equalTo(source3)); + } + case SYNTHETIC -> { + assertThat(translog1.getSourceAsBytesRef().utf8ToString(), equalTo(""" + {"author":{"name":"a1"},"title":"t1"}""")); + assertThat(translog2.getSourceAsBytesRef().utf8ToString(), equalTo(""" + {"author":[{"name":"a1"},{"name":"a2"}],"title":["t1","t2"]}""")); + assertThat(translog3.getSourceAsBytesRef().utf8ToString(), equalTo(""" + {"title":["t1","t2","t3"]}""")); + } + case DISABLED -> { + assertNull(translog1.getSourceAsBytesRef()); + assertNull(translog2.getSourceAsBytesRef()); + assertNull(translog3.getSourceAsBytesRef()); + } + } + assertFalse(client().prepareGet(index, "1").setRealtime(false).get().isExists()); + assertFalse(client().prepareGet(index, "2").setRealtime(false).get().isExists()); + assertFalse(client().prepareGet(index, "3").setRealtime(false).get().isExists()); + refresh(index); + GetResponse lucene1 = client().prepareGet(index, "1").setRealtime(randomBoolean()).get(); + GetResponse lucene2 = client().prepareGet(index, "2").setRealtime(randomBoolean()).get(); + GetResponse lucene3 = client().prepareGet(index, "3").setRealtime(randomBoolean()).get(); + assertTrue(lucene1.isExists()); + assertTrue(lucene2.isExists()); + assertTrue(lucene3.isExists()); + assertThat(translog1.getSourceAsBytesRef(), equalTo(lucene1.getSourceAsBytesRef())); + assertThat(translog2.getSourceAsBytesRef(), equalTo(lucene2.getSourceAsBytesRef())); + assertThat(translog3.getSourceAsBytesRef(), equalTo(lucene3.getSourceAsBytesRef())); + } + private void assertGetFieldsAlwaysWorks(String index, String docId, String[] fields) { assertGetFieldsAlwaysWorks(index, docId, fields, null); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java index 92e5eb8e046bc..88cca3308ac4f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java @@ -54,7 +54,7 @@ public void testCanRecoverFromStoreWithoutPeerRecoveryRetentionLease() throws Ex ensureGreen(INDEX_NAME); IndicesService service = internalCluster().getInstance(IndicesService.class, dataNode); - String uuid = indicesAdmin().getIndex(new GetIndexRequest().indices(INDEX_NAME)) + String uuid = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(INDEX_NAME)) .actionGet() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); Path path = service.indexService(new Index(INDEX_NAME, uuid)).getShard(0).shardPath().getShardStatePath(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java index c3eda84ee9e5e..b1fd483cb9be6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java @@ -87,16 +87,25 @@ public void testResetSystemIndices() throws Exception { ); // verify that both indices are gone - Exception e1 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(systemIndex1)); + Exception e1 = expectThrows( + IndexNotFoundException.class, + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(systemIndex1) + ); assertThat(e1.getMessage(), containsString("no such index")); - Exception e2 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(associatedIndex)); + Exception e2 = expectThrows( + IndexNotFoundException.class, + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(associatedIndex) + ); assertThat(e2.getMessage(), containsString("no such index")); - Exception e3 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(systemIndex2)); + Exception e3 = expectThrows( + IndexNotFoundException.class, + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(systemIndex2) + ); assertThat(e3.getMessage(), containsString("no such index")); - GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices("my_index").get(); + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("my_index").get(); assertThat(response.getIndices(), arrayContaining("my_index")); } diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 4112290fa4e04..2a68b65bcdccb 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -424,22 +424,14 @@ provides org.elasticsearch.features.FeatureSpecification with - org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures, org.elasticsearch.action.bulk.BulkFeatures, org.elasticsearch.features.FeatureInfrastructureFeatures, - org.elasticsearch.health.HealthFeatures, - org.elasticsearch.cluster.metadata.MetadataFeatures, - org.elasticsearch.rest.RestFeatures, - org.elasticsearch.repositories.RepositoriesFeatures, - org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures, org.elasticsearch.index.mapper.MapperFeatures, org.elasticsearch.index.IndexFeatures, - org.elasticsearch.ingest.IngestGeoIpFeatures, org.elasticsearch.search.SearchFeatures, org.elasticsearch.script.ScriptFeatures, org.elasticsearch.search.retriever.RetrieversFeatures, - org.elasticsearch.reservedstate.service.FileSettingsFeatures, org.elasticsearch.action.admin.cluster.stats.ClusterStatsFeatures; uses org.elasticsearch.plugins.internal.SettingsExtension; diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index a430611559bb4..56083902c3cc2 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1824,7 +1824,7 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.indices.recovery.PeerRecoveryNotFound.class, org.elasticsearch.indices.recovery.PeerRecoveryNotFound::new, 158, - TransportVersions.V_7_9_0 + UNKNOWN_VERSION_ADDED ), NODE_HEALTH_CHECK_FAILURE_EXCEPTION( org.elasticsearch.cluster.coordination.NodeHealthCheckFailureException.class, @@ -1836,7 +1836,7 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.transport.NoSeedNodeLeftException.class, org.elasticsearch.transport.NoSeedNodeLeftException::new, 160, - TransportVersions.V_7_10_0 + UNKNOWN_VERSION_ADDED ), AUTHENTICATION_PROCESSING_ERROR( org.elasticsearch.ElasticsearchAuthenticationProcessingError.class, diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index f0f3d27c6e86c..1ab8cdfc2af76 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -62,7 +62,6 @@ static TransportVersion def(int id) { public static final TransportVersion V_7_8_1 = def(7_08_01_99); public static final TransportVersion V_7_9_0 = def(7_09_00_99); public static final TransportVersion V_7_10_0 = def(7_10_00_99); - public static final TransportVersion V_7_10_1 = def(7_10_01_99); public static final TransportVersion V_7_11_0 = def(7_11_00_99); public static final TransportVersion V_7_12_0 = def(7_12_00_99); public static final TransportVersion V_7_13_0 = def(7_13_00_99); @@ -156,6 +155,7 @@ static TransportVersion def(int id) { public static final TransportVersion REPLACE_FAILURE_STORE_OPTIONS_WITH_SELECTOR_SYNTAX = def(8_821_00_0); public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0); public static final TransportVersion KQL_QUERY_TECH_PREVIEW = def(8_823_00_0); + public static final TransportVersion ESQL_PROFILE_ROWS_PROCESSED = def(8_824_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 98d6284fd91d2..ec393b7af5cdf 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -1004,7 +1004,7 @@ public void initRestHandlers(Supplier nodesInCluster, Predicate< // Desired nodes registerHandler.accept(new RestGetDesiredNodesAction()); - registerHandler.accept(new RestUpdateDesiredNodesAction(clusterSupportsFeature)); + registerHandler.accept(new RestUpdateDesiredNodesAction()); registerHandler.accept(new RestDeleteDesiredNodesAction()); for (ActionPlugin plugin : actionPlugins) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java deleted file mode 100644 index 164fc816ad367..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action.admin.cluster.allocation; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class AllocationStatsFeatures implements FeatureSpecification { - public static final NodeFeature INCLUDE_DISK_THRESHOLD_SETTINGS = new NodeFeature("stats.include_disk_thresholds", true); - - @Override - public Set getFeatures() { - return Set.of(INCLUDE_DISK_THRESHOLD_SETTINGS); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java index d929fb457d5d1..23bf22e08985e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -49,7 +48,6 @@ public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAc private final AllocationStatsService allocationStatsService; private final DiskThresholdSettings diskThresholdSettings; - private final FeatureService featureService; @Inject public TransportGetAllocationStatsAction( @@ -58,8 +56,7 @@ public TransportGetAllocationStatsAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - AllocationStatsService allocationStatsService, - FeatureService featureService + AllocationStatsService allocationStatsService ) { super( TYPE.name(), @@ -74,7 +71,6 @@ public TransportGetAllocationStatsAction( ); this.allocationStatsService = allocationStatsService; this.diskThresholdSettings = new DiskThresholdSettings(clusterService.getSettings(), clusterService.getClusterSettings()); - this.featureService = featureService; } @Override @@ -92,10 +88,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A listener.onResponse( new Response( request.metrics().contains(Metric.ALLOCATIONS) ? allocationStatsService.stats() : Map.of(), - request.metrics().contains(Metric.FS) - && featureService.clusterHasFeature(clusterService.state(), AllocationStatsFeatures.INCLUDE_DISK_THRESHOLD_SETTINGS) - ? diskThresholdSettings - : null + request.metrics().contains(Metric.FS) ? diskThresholdSettings : null ) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodeCapability.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodeCapability.java index 7d70e83f6558c..6ff5d347ab058 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodeCapability.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodeCapability.java @@ -41,4 +41,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(supported); } + + @Override + public String toString() { + return "NodeCapability{supported=" + supported + '}'; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java index 9fede2ebb5be6..beb0e1f927de2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.admin.cluster.node.capabilities; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; @@ -20,11 +19,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; @@ -32,7 +29,6 @@ import java.io.IOException; import java.util.List; -import java.util.Optional; import java.util.Set; public class TransportNodesCapabilitiesAction extends TransportNodesAction< @@ -45,7 +41,6 @@ public class TransportNodesCapabilitiesAction extends TransportNodesAction< public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/capabilities"); private final RestController restController; - private final FeatureService featureService; @Inject public TransportNodesCapabilitiesAction( @@ -53,8 +48,7 @@ public TransportNodesCapabilitiesAction( ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - RestController restController, - FeatureService featureService + RestController restController ) { super( TYPE.name(), @@ -65,23 +59,6 @@ public TransportNodesCapabilitiesAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.restController = restController; - this.featureService = featureService; - } - - @Override - protected void doExecute(Task task, NodesCapabilitiesRequest request, ActionListener listener) { - if (featureService.clusterHasFeature(clusterService.state(), RestNodesCapabilitiesAction.CAPABILITIES_ACTION) == false) { - // not everything in the cluster supports capabilities. - // Therefore we don't support whatever it is we're being asked for - listener.onResponse(new NodesCapabilitiesResponse(clusterService.getClusterName(), List.of(), List.of()) { - @Override - public Optional isSupported() { - return Optional.of(false); - } - }); - } else { - super.doExecute(task, request, listener); - } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index a7d92682b763c..08825706c09ef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -111,9 +111,7 @@ public NodeInfo(StreamInput in) throws IOException { addInfoIfNonNull(HttpInfo.class, in.readOptionalWriteable(HttpInfo::new)); addInfoIfNonNull(PluginsAndModules.class, in.readOptionalWriteable(PluginsAndModules::new)); addInfoIfNonNull(IngestInfo.class, in.readOptionalWriteable(IngestInfo::new)); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); - } + addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { addInfoIfNonNull(RemoteClusterServerInfo.class, in.readOptionalWriteable(RemoteClusterServerInfo::new)); } @@ -285,9 +283,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(getInfo(HttpInfo.class)); out.writeOptionalWriteable(getInfo(PluginsAndModules.class)); out.writeOptionalWriteable(getInfo(IngestInfo.class)); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeOptionalWriteable(getInfo(AggregationInfo.class)); - } + out.writeOptionalWriteable(getInfo(AggregationInfo.class)); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalWriteable(getInfo(RemoteClusterServerInfo.class)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index be7aaeec8f69e..05c44b55cf8bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -93,8 +94,8 @@ public static Feature[] fromRequest(RestRequest request) { private boolean humanReadable = false; private transient boolean includeDefaults = false; - public GetIndexRequest() { - super(IndicesOptions.strictExpandOpen()); + public GetIndexRequest(TimeValue masterTimeout) { + super(masterTimeout, IndicesOptions.strictExpandOpen()); } public GetIndexRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java index 51088124b084d..18abb9e5c58e1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -12,11 +12,12 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder { - public GetIndexRequestBuilder(ElasticsearchClient client, String... indices) { - super(client, GetIndexAction.INSTANCE, new GetIndexRequest().indices(indices)); + public GetIndexRequestBuilder(ElasticsearchClient client, TimeValue masterTimeout, String... indices) { + super(client, GetIndexAction.INSTANCE, new GetIndexRequest(masterTimeout).indices(indices)); } public GetIndexRequestBuilder setFeatures(Feature... features) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java index be08293fe90db..c845d1a3854c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -55,8 +54,6 @@ public final class LazyRolloverAction extends ActionType { private static final Logger logger = LogManager.getLogger(LazyRolloverAction.class); - public static final NodeFeature DATA_STREAM_LAZY_ROLLOVER = new NodeFeature("data_stream.rollover.lazy", true); - public static final LazyRolloverAction INSTANCE = new LazyRolloverAction(); public static final String NAME = "indices:admin/data_stream/lazy_rollover"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java index 67b6df150c458..6106e620521f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.NodeFeature; import java.util.ArrayList; import java.util.HashMap; @@ -22,9 +21,6 @@ public class IndexStats implements Iterable { - // feature was effectively reverted but we still need to keep this constant around - public static final NodeFeature REVERTED_TIER_CREATION_DATE = new NodeFeature("stats.tier_creation_date", true); - private final String index; private final String uuid; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java deleted file mode 100644 index 558343db1023a..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action.admin.indices.stats; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class IndicesStatsFeatures implements FeatureSpecification { - - @Override - public Set getFeatures() { - return Set.of(IndexStats.REVERTED_TIER_CREATION_DATE); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java index 998a3ada5d157..5851549977eab 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java @@ -14,24 +14,10 @@ import java.util.Set; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_IGNORED_FIELDS; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_ADDITION; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION_TEMPLATES; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING; public class BulkFeatures implements FeatureSpecification { public Set getFeatures() { - return Set.of( - SIMULATE_MAPPING_VALIDATION, - SIMULATE_MAPPING_VALIDATION_TEMPLATES, - SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS, - SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS, - SIMULATE_MAPPING_ADDITION, - SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING, - SIMULATE_IGNORED_FIELDS - ); + return Set.of(SIMULATE_IGNORED_FIELDS); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 2a6a789d9d312..523381321ada7 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -44,7 +44,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; import org.elasticsearch.indices.SystemIndices; @@ -80,7 +79,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction { private static final Logger logger = LogManager.getLogger(TransportBulkAction.class); public static final String LAZY_ROLLOVER_ORIGIN = "lazy_rollover"; - private final FeatureService featureService; private final NodeClient client; private final IndexNameExpressionResolver indexNameExpressionResolver; private final OriginSettingClient rolloverClient; @@ -93,7 +91,6 @@ public TransportBulkAction( TransportService transportService, ClusterService clusterService, IngestService ingestService, - FeatureService featureService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, @@ -107,7 +104,6 @@ public TransportBulkAction( transportService, clusterService, ingestService, - featureService, client, actionFilters, indexNameExpressionResolver, @@ -124,7 +120,6 @@ public TransportBulkAction( TransportService transportService, ClusterService clusterService, IngestService ingestService, - FeatureService featureService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, @@ -141,7 +136,6 @@ public TransportBulkAction( transportService, clusterService, ingestService, - featureService, client, actionFilters, indexNameExpressionResolver, @@ -160,7 +154,6 @@ public TransportBulkAction( TransportService transportService, ClusterService clusterService, IngestService ingestService, - FeatureService featureService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, @@ -184,7 +177,6 @@ public TransportBulkAction( ); this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings; Objects.requireNonNull(relativeTimeProvider); - this.featureService = featureService; this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); @@ -283,7 +275,6 @@ private void populateMissingTargets( // A map for memorizing which indices exist. Map indexExistence = new HashMap<>(); Function indexExistenceComputation = (index) -> indexNameExpressionResolver.hasIndexAbstraction(index, state); - boolean lazyRolloverFeature = featureService.clusterHasFeature(state, LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER); boolean lazyRolloverFailureStoreFeature = DataStream.isFailureStoreFeatureFlagEnabled(); Set indicesThatRequireAlias = new HashSet<>(); @@ -328,18 +319,15 @@ private void populateMissingTargets( } } // Determine which data streams and failure stores need to be rolled over. - if (lazyRolloverFeature) { - DataStream dataStream = state.metadata().dataStreams().get(request.index()); - if (dataStream != null) { - if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) { - dataStreamsToBeRolledOver.add(request.index()); - } else if (lazyRolloverFailureStoreFeature - && writeToFailureStore - && dataStream.getFailureIndices().isRolloverOnWrite()) { - failureStoresToBeRolledOver.add(request.index()); - } + DataStream dataStream = state.metadata().dataStreams().get(request.index()); + if (dataStream != null) { + if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) { + dataStreamsToBeRolledOver.add(request.index()); + } else if (lazyRolloverFailureStoreFeature && writeToFailureStore && dataStream.getFailureIndices().isRolloverOnWrite()) { + failureStoresToBeRolledOver.add(request.index()); } } + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index 2d65bea4ac5c2..18c420d99f525 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -83,15 +83,6 @@ * shards are not actually modified). */ public class TransportSimulateBulkAction extends TransportAbstractBulkAction { - public static final NodeFeature SIMULATE_MAPPING_VALIDATION = new NodeFeature("simulate.mapping.validation", true); - public static final NodeFeature SIMULATE_MAPPING_VALIDATION_TEMPLATES = new NodeFeature("simulate.mapping.validation.templates", true); - public static final NodeFeature SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS = new NodeFeature( - "simulate.component.template.substitutions", - true - ); - public static final NodeFeature SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS = new NodeFeature("simulate.index.template.substitutions", true); - public static final NodeFeature SIMULATE_MAPPING_ADDITION = new NodeFeature("simulate.mapping.addition", true); - public static final NodeFeature SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING = new NodeFeature("simulate.support.non.template.mapping", true); public static final NodeFeature SIMULATE_IGNORED_FIELDS = new NodeFeature("simulate.ignored.fields"); private final IndicesService indicesService; diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java index 578b0a61aafde..2eed45e5afa6d 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java @@ -23,8 +23,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.Index; import java.util.List; @@ -43,8 +41,6 @@ public class DataStreamAutoShardingService { private static final Logger logger = LogManager.getLogger(DataStreamAutoShardingService.class); public static final String DATA_STREAMS_AUTO_SHARDING_ENABLED = "data_streams.auto_sharding.enabled"; - public static final NodeFeature DATA_STREAM_AUTO_SHARDING_FEATURE = new NodeFeature("data_stream.auto_sharding", true); - public static final Setting> DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING = Setting.listSetting( "data_streams.auto_sharding.excludes", List.of(), @@ -101,7 +97,6 @@ public class DataStreamAutoShardingService { ); private final ClusterService clusterService; private final boolean isAutoShardingEnabled; - private final FeatureService featureService; private final LongSupplier nowSupplier; private volatile TimeValue increaseShardsCooldown; private volatile TimeValue reduceShardsCooldown; @@ -109,12 +104,7 @@ public class DataStreamAutoShardingService { private volatile int maxWriteThreads; private volatile List dataStreamExcludePatterns; - public DataStreamAutoShardingService( - Settings settings, - ClusterService clusterService, - FeatureService featureService, - LongSupplier nowSupplier - ) { + public DataStreamAutoShardingService(Settings settings, ClusterService clusterService, LongSupplier nowSupplier) { this.clusterService = clusterService; this.isAutoShardingEnabled = settings.getAsBoolean(DATA_STREAMS_AUTO_SHARDING_ENABLED, false); this.increaseShardsCooldown = DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.get(settings); @@ -122,7 +112,6 @@ public DataStreamAutoShardingService( this.minWriteThreads = CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS.get(settings); this.maxWriteThreads = CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS.get(settings); this.dataStreamExcludePatterns = DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.get(settings); - this.featureService = featureService; this.nowSupplier = nowSupplier; } @@ -168,15 +157,6 @@ public AutoShardingResult calculate(ClusterState state, DataStream dataStream, @ return NOT_APPLICABLE_RESULT; } - if (featureService.clusterHasFeature(state, DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE) == false) { - logger.debug( - "Data stream auto sharding service cannot compute the optimal number of shards for data stream [{}] because the cluster " - + "doesn't have the auto sharding feature", - dataStream.getName() - ); - return NOT_APPLICABLE_RESULT; - } - if (dataStreamExcludePatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, dataStream.getName()))) { logger.debug( "Data stream [{}] is excluded from auto sharding via the [{}] setting", diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 80ffd305bad52..73e6a0306247d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -231,7 +231,7 @@ public final void start() { } @Override - public final void run() { + protected final void run() { for (final SearchShardIterator iterator : toSkipShardsIts) { assert iterator.skip(); skipShard(iterator); @@ -286,7 +286,7 @@ private static boolean assertExecuteOnStartThread() { return true; } - protected void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) { + private void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) { if (throttleConcurrentRequests) { var pendingExecutions = pendingExecutionsPerNode.computeIfAbsent( shard.getNodeId(), @@ -349,7 +349,7 @@ protected abstract void executePhaseOnShard( * of the next phase. If there are no successful operations in the context when this method is executed the search is aborted and * a response is returned to the user indicating that all shards have failed. */ - protected void executeNextPhase(SearchPhase currentPhase, Supplier nextPhaseSupplier) { + protected void executeNextPhase(String currentPhase, Supplier nextPhaseSupplier) { /* This is the main search phase transition where we move to the next phase. If all shards * failed or if there was a failure and partial results are not allowed, then we immediately * fail. Otherwise we continue to the next phase. @@ -360,7 +360,7 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug(() -> "All shards failed for phase: [" + currentPhase.getName() + "]", cause); + logger.debug(() -> "All shards failed for phase: [" + currentPhase + "]", cause); onPhaseFailure(currentPhase, "all shards failed", cause); } else { Boolean allowPartialResults = request.allowPartialSearchResults(); @@ -373,7 +373,7 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier int numShardFailures = shardSearchFailures.length; shardSearchFailures = ExceptionsHelper.groupBy(shardSearchFailures); Throwable cause = ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase.getName()), cause); + logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase), cause); } onPhaseFailure(currentPhase, "Partial shards failure", null); } else { @@ -386,7 +386,7 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier successfulOps.get(), skippedOps.get(), getNumShards(), - currentPhase.getName() + currentPhase ); } onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null); @@ -400,7 +400,7 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier .collect(Collectors.joining(",")); logger.trace( "[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})", - currentPhase.getName(), + currentPhase, nextPhase.getName(), resultsFrom, clusterStateVersion @@ -413,11 +413,11 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier private void executePhase(SearchPhase phase) { try { phase.run(); - } catch (Exception e) { + } catch (RuntimeException e) { if (logger.isDebugEnabled()) { logger.debug(() -> format("Failed to execute [%s] while moving to [%s] phase", request, phase.getName()), e); } - onPhaseFailure(phase, "", e); + onPhaseFailure(phase.getName(), "", e); } } @@ -693,8 +693,8 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At * @param msg an optional message * @param cause the cause of the phase failure */ - public void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { - raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); + public void onPhaseFailure(String phase, String msg, Throwable cause) { + raisePhaseFailure(new SearchPhaseExecutionException(phase, msg, cause, buildShardFailures())); } /** @@ -739,7 +739,7 @@ void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connecti * @see #onShardResult(SearchPhaseResult, SearchShardIterator) */ private void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() - executeNextPhase(this, this::getNextPhase); + executeNextPhase(getName(), this::getNextPhase); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index cc8c4becea9a9..faeb552530e47 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -39,6 +39,9 @@ * @see CountedCollector#onFailure(int, SearchShardTarget, Exception) */ final class DfsQueryPhase extends SearchPhase { + + public static final String NAME = "dfs_query"; + private final SearchPhaseResults queryResult; private final List searchResults; private final AggregatedDfs dfs; @@ -56,7 +59,7 @@ final class DfsQueryPhase extends SearchPhase { Function, SearchPhase> nextPhaseFactory, AbstractSearchAsyncAction context ) { - super("dfs_query"); + super(NAME); this.progressListener = context.getTask().getProgressListener(); this.queryResult = queryResult; this.searchResults = searchResults; @@ -68,13 +71,13 @@ final class DfsQueryPhase extends SearchPhase { } @Override - public void run() { + protected void run() { // TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs // to free up memory early final CountedCollector counter = new CountedCollector<>( queryResult, searchResults.size(), - () -> context.executeNextPhase(this, () -> nextPhaseFactory.apply(queryResult)), + () -> context.executeNextPhase(NAME, () -> nextPhaseFactory.apply(queryResult)), context ); @@ -106,7 +109,7 @@ protected void innerOnResponse(QuerySearchResult response) { response.setSearchProfileDfsPhaseResult(dfsResult.searchProfileDfsPhaseResult()); counter.onResult(response); } catch (Exception e) { - context.onPhaseFailure(DfsQueryPhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index e8d94c32bdcc7..b0b3f15265920 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -31,12 +31,15 @@ * forwards to the next phase immediately. */ final class ExpandSearchPhase extends SearchPhase { + + static final String NAME = "expand"; + private final AbstractSearchAsyncAction context; private final SearchHits searchHits; private final Supplier nextPhase; ExpandSearchPhase(AbstractSearchAsyncAction context, SearchHits searchHits, Supplier nextPhase) { - super("expand"); + super(NAME); this.context = context; this.searchHits = searchHits; this.nextPhase = nextPhase; @@ -51,7 +54,7 @@ private boolean isCollapseRequest() { } @Override - public void run() { + protected void run() { if (isCollapseRequest() == false || searchHits.getHits().length == 0) { onPhaseDone(); } else { @@ -123,7 +126,7 @@ private void doRun() { } private void phaseFailure(Exception ex) { - context.onPhaseFailure(this, "failed to expand hits", ex); + context.onPhaseFailure(NAME, "failed to expand hits", ex); } private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) { @@ -168,6 +171,6 @@ private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilde } private void onPhaseDone() { - context.executeNextPhase(this, nextPhase); + context.executeNextPhase(NAME, nextPhase); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java index d8671bcadf86d..2e98d50196490 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java @@ -33,6 +33,9 @@ * @see org.elasticsearch.index.mapper.LookupRuntimeFieldType */ final class FetchLookupFieldsPhase extends SearchPhase { + + static final String NAME = "fetch_lookup_fields"; + private final AbstractSearchAsyncAction context; private final SearchResponseSections searchResponse; private final AtomicArray queryResults; @@ -42,7 +45,7 @@ final class FetchLookupFieldsPhase extends SearchPhase { SearchResponseSections searchResponse, AtomicArray queryResults ) { - super("fetch_lookup_fields"); + super(NAME); this.context = context; this.searchResponse = searchResponse; this.queryResults = queryResults; @@ -74,7 +77,7 @@ private static List groupLookupFieldsByClusterAlias(SearchHits searchHi } @Override - public void run() { + protected void run() { final List clusters = groupLookupFieldsByClusterAlias(searchResponse.hits); if (clusters.isEmpty()) { context.sendSearchResponse(searchResponse, queryResults); @@ -129,7 +132,7 @@ public void onResponse(MultiSearchResponse items) { } } if (failure != null) { - context.onPhaseFailure(FetchLookupFieldsPhase.this, "failed to fetch lookup fields", failure); + context.onPhaseFailure(NAME, "failed to fetch lookup fields", failure); } else { context.sendSearchResponse(searchResponse, queryResults); } @@ -137,7 +140,7 @@ public void onResponse(MultiSearchResponse items) { @Override public void onFailure(Exception e) { - context.onPhaseFailure(FetchLookupFieldsPhase.this, "failed to fetch lookup fields", e); + context.onPhaseFailure(NAME, "failed to fetch lookup fields", e); } }); } diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 8568b60916761..119cfcab76105 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -34,6 +34,9 @@ * Then it reaches out to all relevant shards to fetch the topN hits. */ final class FetchSearchPhase extends SearchPhase { + + static final String NAME = "fetch"; + private final AtomicArray searchPhaseShardResults; private final BiFunction, SearchPhase> nextPhaseFactory; private final AbstractSearchAsyncAction context; @@ -70,7 +73,7 @@ final class FetchSearchPhase extends SearchPhase { @Nullable SearchPhaseController.ReducedQueryPhase reducedQueryPhase, BiFunction, SearchPhase> nextPhaseFactory ) { - super("fetch"); + super(NAME); if (context.getNumShards() != resultConsumer.getNumShards()) { throw new IllegalStateException( "number of shards must match the length of the query results but doesn't:" @@ -90,7 +93,7 @@ final class FetchSearchPhase extends SearchPhase { } @Override - public void run() { + protected void run() { context.execute(new AbstractRunnable() { @Override @@ -100,7 +103,7 @@ protected void doRun() throws Exception { @Override public void onFailure(Exception e) { - context.onPhaseFailure(FetchSearchPhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } }); } @@ -222,7 +225,7 @@ public void innerOnResponse(FetchSearchResult result) { progressListener.notifyFetchResult(shardIndex); counter.onResult(result); } catch (Exception e) { - context.onPhaseFailure(FetchSearchPhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } } @@ -269,7 +272,7 @@ private void moveToNextPhase( AtomicArray fetchResultsArr, SearchPhaseController.ReducedQueryPhase reducedQueryPhase ) { - context.executeNextPhase(this, () -> { + context.executeNextPhase(NAME, () -> { var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); context.addReleasable(resp); return nextPhaseFactory.apply(resp, searchPhaseShardResults); diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index 37d5065fdd031..9a8dd94dcd324 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -29,7 +29,6 @@ import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -67,8 +66,8 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults onPartialMergeFailure; private final int batchReduceSize; - private final List buffer = new ArrayList<>(); - private final List emptyResults = new ArrayList<>(); + private List buffer = new ArrayList<>(); + private List emptyResults = new ArrayList<>(); // the memory that is accounted in the circuit breaker for this consumer private volatile long circuitBreakerBytes; // the memory that is currently used in the buffer @@ -159,32 +158,40 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception { if (f != null) { throw f; } - + List buffer; + synchronized (this) { + // final reduce, we're done with the buffer so we just null it out and continue with a local variable to + // save field references. The synchronized block is never contended but needed to have a memory barrier and sync buffer's + // contents with all the previous writers to it + buffer = this.buffer; + buffer = buffer == null ? Collections.emptyList() : buffer; + this.buffer = null; + } // ensure consistent ordering - sortBuffer(); + buffer.sort(RESULT_COMPARATOR); final TopDocsStats topDocsStats = this.topDocsStats; + var mergeResult = this.mergeResult; + this.mergeResult = null; final int resultSize = buffer.size() + (mergeResult == null ? 0 : 1); final List topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null; final List> aggsList = hasAggs ? new ArrayList<>(resultSize) : null; - synchronized (this) { - if (mergeResult != null) { - if (topDocsList != null) { - topDocsList.add(mergeResult.reducedTopDocs); - } - if (aggsList != null) { - aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs)); - } + if (mergeResult != null) { + if (topDocsList != null) { + topDocsList.add(mergeResult.reducedTopDocs); } - for (QuerySearchResult result : buffer) { - topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); - if (topDocsList != null) { - TopDocsAndMaxScore topDocs = result.consumeTopDocs(); - setShardIndex(topDocs.topDocs, result.getShardIndex()); - topDocsList.add(topDocs.topDocs); - } - if (aggsList != null) { - aggsList.add(result.getAggs()); - } + if (aggsList != null) { + aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs)); + } + } + for (QuerySearchResult result : buffer) { + topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); + if (topDocsList != null) { + TopDocsAndMaxScore topDocs = result.consumeTopDocs(); + setShardIndex(topDocs.topDocs, result.getShardIndex()); + topDocsList.add(topDocs.topDocs); + } + if (aggsList != null) { + aggsList.add(result.getAggs()); } } SearchPhaseController.ReducedQueryPhase reducePhase; @@ -206,7 +213,7 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception { performFinalReduce ); } finally { - releaseAggs(); + releaseAggs(buffer); } if (hasAggs // reduced aggregations can be null if all shards failed @@ -226,25 +233,25 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception { ); } return reducePhase; + } private static final Comparator RESULT_COMPARATOR = Comparator.comparingInt(QuerySearchResult::getShardIndex); private MergeResult partialReduce( - QuerySearchResult[] toConsume, - List emptyResults, + List toConsume, + List processedShards, TopDocsStats topDocsStats, MergeResult lastMerge, int numReducePhases ) { // ensure consistent ordering - Arrays.sort(toConsume, RESULT_COMPARATOR); + toConsume.sort(RESULT_COMPARATOR); - final List processedShards = new ArrayList<>(emptyResults); final TopDocs newTopDocs; final InternalAggregations newAggs; final List> aggsList; - final int resultSetSize = toConsume.length + (lastMerge != null ? 1 : 0); + final int resultSetSize = toConsume.size() + (lastMerge != null ? 1 : 0); if (hasAggs) { aggsList = new ArrayList<>(resultSetSize); if (lastMerge != null) { @@ -307,12 +314,6 @@ private boolean hasPendingMerges() { return queue.isEmpty() == false || runningTask.get() != null; } - void sortBuffer() { - if (buffer.size() > 0) { - buffer.sort(RESULT_COMPARATOR); - } - } - private synchronized void addWithoutBreaking(long size) { circuitBreaker.addWithoutBreaking(size); circuitBreakerBytes += size; @@ -376,21 +377,21 @@ private void consume(QuerySearchResult result, Runnable next) { } } if (hasFailure == false) { + var b = buffer; aggsCurrentBufferSize += aggsSize; // add one if a partial merge is pending - int size = buffer.size() + (hasPartialReduce ? 1 : 0); + int size = b.size() + (hasPartialReduce ? 1 : 0); if (size >= batchReduceSize) { hasPartialReduce = true; executeNextImmediately = false; - QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); - MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); + MergeTask task = new MergeTask(b, aggsCurrentBufferSize, emptyResults, next); + b = buffer = new ArrayList<>(); + emptyResults = new ArrayList<>(); aggsCurrentBufferSize = 0; - buffer.clear(); - emptyResults.clear(); queue.add(task); tryExecuteNext(); } - buffer.add(result); + b.add(result); } } } @@ -404,10 +405,13 @@ private void consume(QuerySearchResult result, Runnable next) { } private void releaseBuffer() { - for (QuerySearchResult querySearchResult : buffer) { - querySearchResult.releaseAggs(); + var b = buffer; + if (b != null) { + this.buffer = null; + for (QuerySearchResult querySearchResult : b) { + querySearchResult.releaseAggs(); + } } - buffer.clear(); } private synchronized void onMergeFailure(Exception exc) { @@ -449,7 +453,7 @@ private void tryExecuteNext() { @Override protected void doRun() { MergeTask mergeTask = task; - QuerySearchResult[] toConsume = mergeTask.consumeBuffer(); + List toConsume = mergeTask.consumeBuffer(); while (mergeTask != null) { final MergeResult thisMergeResult = mergeResult; long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + mergeTask.aggsBufferSize; @@ -512,15 +516,7 @@ public void onFailure(Exception exc) { }); } - private synchronized void releaseAggs() { - if (hasAggs) { - for (QuerySearchResult result : buffer) { - result.releaseAggs(); - } - } - } - - private static void releaseAggs(QuerySearchResult... toConsume) { + private static void releaseAggs(List toConsume) { for (QuerySearchResult result : toConsume) { result.releaseAggs(); } @@ -535,19 +531,19 @@ private record MergeResult( private static class MergeTask { private final List emptyResults; - private QuerySearchResult[] buffer; + private List buffer; private final long aggsBufferSize; private Runnable next; - private MergeTask(QuerySearchResult[] buffer, long aggsBufferSize, List emptyResults, Runnable next) { + private MergeTask(List buffer, long aggsBufferSize, List emptyResults, Runnable next) { this.buffer = buffer; this.aggsBufferSize = aggsBufferSize; this.emptyResults = emptyResults; this.next = next; } - public synchronized QuerySearchResult[] consumeBuffer() { - QuerySearchResult[] toRet = buffer; + public synchronized List consumeBuffer() { + List toRet = buffer; buffer = null; return toRet; } @@ -559,7 +555,7 @@ public synchronized Runnable consumeListener() { } public void cancel() { - QuerySearchResult[] buffer = consumeBuffer(); + List buffer = consumeBuffer(); if (buffer != null) { releaseAggs(buffer); } diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index 199228c9f992c..e9302883457e1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -37,6 +37,8 @@ */ public class RankFeaturePhase extends SearchPhase { + static final String NAME = "rank-feature"; + private static final Logger logger = LogManager.getLogger(RankFeaturePhase.class); private final AbstractSearchAsyncAction context; final SearchPhaseResults queryPhaseResults; @@ -51,7 +53,7 @@ public class RankFeaturePhase extends SearchPhase { AbstractSearchAsyncAction context, RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext ) { - super("rank-feature"); + super(NAME); assert rankFeaturePhaseRankCoordinatorContext != null; this.rankFeaturePhaseRankCoordinatorContext = rankFeaturePhaseRankCoordinatorContext; if (context.getNumShards() != queryPhaseResults.getNumShards()) { @@ -71,7 +73,7 @@ public class RankFeaturePhase extends SearchPhase { } @Override - public void run() { + protected void run() { context.execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { @@ -84,7 +86,7 @@ protected void doRun() throws Exception { @Override public void onFailure(Exception e) { - context.onPhaseFailure(RankFeaturePhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } }); } @@ -139,7 +141,7 @@ protected void innerOnResponse(RankFeatureResult response) { progressListener.notifyRankFeatureResult(shardIndex); rankRequestCounter.onResult(response); } catch (Exception e) { - context.onPhaseFailure(RankFeaturePhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } } @@ -194,7 +196,7 @@ public void onResponse(RankFeatureDoc[] docsWithUpdatedScores) { @Override public void onFailure(Exception e) { - context.onPhaseFailure(RankFeaturePhase.this, "Computing updated ranks for results failed", e); + context.onPhaseFailure(NAME, "Computing updated ranks for results failed", e); } } ); @@ -239,6 +241,6 @@ private float maxScore(ScoreDoc[] scoreDocs) { } void moveToNextPhase(SearchPhaseResults phaseResults, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) { - context.executeNextPhase(this, () -> new FetchSearchPhase(phaseResults, aggregatedDfs, context, reducedQueryPhase)); + context.executeNextPhase(NAME, () -> new FetchSearchPhase(phaseResults, aggregatedDfs, context, reducedQueryPhase)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 25d59a06664da..5c5c47b5fcc44 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -10,6 +10,13 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -17,12 +24,16 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsKnnResults; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.transport.Transport; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.Executor; @@ -93,12 +104,11 @@ protected void executePhaseOnShard( @Override protected SearchPhase getNextPhase() { final List dfsSearchResults = results.getAtomicArray().asList(); - final AggregatedDfs aggregatedDfs = SearchPhaseController.aggregateDfs(dfsSearchResults); - final List mergedKnnResults = SearchPhaseController.mergeKnnResults(getRequest(), dfsSearchResults); + final AggregatedDfs aggregatedDfs = aggregateDfs(dfsSearchResults); return new DfsQueryPhase( dfsSearchResults, aggregatedDfs, - mergedKnnResults, + mergeKnnResults(getRequest(), dfsSearchResults), queryPhaseResultConsumer, (queryResults) -> SearchQueryThenFetchAsyncAction.nextPhase(client, this, queryResults, aggregatedDfs), this @@ -109,4 +119,95 @@ protected SearchPhase getNextPhase() { protected void onShardGroupFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) { progressListener.notifyQueryFailure(shardIndex, shardTarget, exc); } + + private static List mergeKnnResults(SearchRequest request, List dfsSearchResults) { + if (request.hasKnnSearch() == false) { + return null; + } + SearchSourceBuilder source = request.source(); + List> topDocsLists = new ArrayList<>(source.knnSearch().size()); + List> nestedPath = new ArrayList<>(source.knnSearch().size()); + for (int i = 0; i < source.knnSearch().size(); i++) { + topDocsLists.add(new ArrayList<>()); + nestedPath.add(new SetOnce<>()); + } + + for (DfsSearchResult dfsSearchResult : dfsSearchResults) { + if (dfsSearchResult.knnResults() != null) { + for (int i = 0; i < dfsSearchResult.knnResults().size(); i++) { + DfsKnnResults knnResults = dfsSearchResult.knnResults().get(i); + ScoreDoc[] scoreDocs = knnResults.scoreDocs(); + TotalHits totalHits = new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO); + TopDocs shardTopDocs = new TopDocs(totalHits, scoreDocs); + SearchPhaseController.setShardIndex(shardTopDocs, dfsSearchResult.getShardIndex()); + topDocsLists.get(i).add(shardTopDocs); + nestedPath.get(i).trySet(knnResults.getNestedPath()); + } + } + } + + List mergedResults = new ArrayList<>(source.knnSearch().size()); + for (int i = 0; i < source.knnSearch().size(); i++) { + TopDocs mergedTopDocs = TopDocs.merge(source.knnSearch().get(i).k(), topDocsLists.get(i).toArray(new TopDocs[0])); + mergedResults.add(new DfsKnnResults(nestedPath.get(i).get(), mergedTopDocs.scoreDocs)); + } + return mergedResults; + } + + private static AggregatedDfs aggregateDfs(Collection results) { + Map termStatistics = new HashMap<>(); + Map fieldStatistics = new HashMap<>(); + long aggMaxDoc = 0; + for (DfsSearchResult lEntry : results) { + final Term[] terms = lEntry.terms(); + final TermStatistics[] stats = lEntry.termStatistics(); + assert terms.length == stats.length; + for (int i = 0; i < terms.length; i++) { + assert terms[i] != null; + if (stats[i] == null) { + continue; + } + TermStatistics existing = termStatistics.get(terms[i]); + if (existing != null) { + assert terms[i].bytes().equals(existing.term()); + termStatistics.put( + terms[i], + new TermStatistics( + existing.term(), + existing.docFreq() + stats[i].docFreq(), + existing.totalTermFreq() + stats[i].totalTermFreq() + ) + ); + } else { + termStatistics.put(terms[i], stats[i]); + } + + } + + assert lEntry.fieldStatistics().containsKey(null) == false; + for (var entry : lEntry.fieldStatistics().entrySet()) { + String key = entry.getKey(); + CollectionStatistics value = entry.getValue(); + if (value == null) { + continue; + } + assert key != null; + CollectionStatistics existing = fieldStatistics.get(key); + if (existing != null) { + CollectionStatistics merged = new CollectionStatistics( + key, + existing.maxDoc() + value.maxDoc(), + existing.docCount() + value.docCount(), + existing.sumTotalTermFreq() + value.sumTotalTermFreq(), + existing.sumDocFreq() + value.sumDocFreq() + ); + fieldStatistics.put(key, merged); + } else { + fieldStatistics.put(key, value); + } + } + aggMaxDoc += lEntry.maxDoc(); + } + return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index 7d849a72abf9d..702369dc38390 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -9,25 +9,25 @@ package org.elasticsearch.action.search; import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.Objects; import java.util.function.Function; /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. */ -abstract class SearchPhase implements CheckedRunnable { +abstract class SearchPhase { private final String name; protected SearchPhase(String name) { this.name = Objects.requireNonNull(name, "name must not be null"); } + protected abstract void run(); + /** * Returns the phases name. */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 69e7fba4dd0d5..f8736ab79690e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -9,20 +9,16 @@ package org.elasticsearch.action.search; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.io.stream.DelayableWriteable; import org.elasticsearch.common.lucene.Lucene; @@ -42,9 +38,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.dfs.AggregatedDfs; -import org.elasticsearch.search.dfs.DfsKnnResults; -import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; @@ -84,97 +77,6 @@ public SearchPhaseController( this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder; } - public static AggregatedDfs aggregateDfs(Collection results) { - Map termStatistics = new HashMap<>(); - Map fieldStatistics = new HashMap<>(); - long aggMaxDoc = 0; - for (DfsSearchResult lEntry : results) { - final Term[] terms = lEntry.terms(); - final TermStatistics[] stats = lEntry.termStatistics(); - assert terms.length == stats.length; - for (int i = 0; i < terms.length; i++) { - assert terms[i] != null; - if (stats[i] == null) { - continue; - } - TermStatistics existing = termStatistics.get(terms[i]); - if (existing != null) { - assert terms[i].bytes().equals(existing.term()); - termStatistics.put( - terms[i], - new TermStatistics( - existing.term(), - existing.docFreq() + stats[i].docFreq(), - existing.totalTermFreq() + stats[i].totalTermFreq() - ) - ); - } else { - termStatistics.put(terms[i], stats[i]); - } - - } - - assert lEntry.fieldStatistics().containsKey(null) == false; - for (var entry : lEntry.fieldStatistics().entrySet()) { - String key = entry.getKey(); - CollectionStatistics value = entry.getValue(); - if (value == null) { - continue; - } - assert key != null; - CollectionStatistics existing = fieldStatistics.get(key); - if (existing != null) { - CollectionStatistics merged = new CollectionStatistics( - key, - existing.maxDoc() + value.maxDoc(), - existing.docCount() + value.docCount(), - existing.sumTotalTermFreq() + value.sumTotalTermFreq(), - existing.sumDocFreq() + value.sumDocFreq() - ); - fieldStatistics.put(key, merged); - } else { - fieldStatistics.put(key, value); - } - } - aggMaxDoc += lEntry.maxDoc(); - } - return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc); - } - - public static List mergeKnnResults(SearchRequest request, List dfsSearchResults) { - if (request.hasKnnSearch() == false) { - return null; - } - SearchSourceBuilder source = request.source(); - List> topDocsLists = new ArrayList<>(source.knnSearch().size()); - List> nestedPath = new ArrayList<>(source.knnSearch().size()); - for (int i = 0; i < source.knnSearch().size(); i++) { - topDocsLists.add(new ArrayList<>()); - nestedPath.add(new SetOnce<>()); - } - - for (DfsSearchResult dfsSearchResult : dfsSearchResults) { - if (dfsSearchResult.knnResults() != null) { - for (int i = 0; i < dfsSearchResult.knnResults().size(); i++) { - DfsKnnResults knnResults = dfsSearchResult.knnResults().get(i); - ScoreDoc[] scoreDocs = knnResults.scoreDocs(); - TotalHits totalHits = new TotalHits(scoreDocs.length, Relation.EQUAL_TO); - TopDocs shardTopDocs = new TopDocs(totalHits, scoreDocs); - setShardIndex(shardTopDocs, dfsSearchResult.getShardIndex()); - topDocsLists.get(i).add(shardTopDocs); - nestedPath.get(i).trySet(knnResults.getNestedPath()); - } - } - } - - List mergedResults = new ArrayList<>(source.knnSearch().size()); - for (int i = 0; i < source.knnSearch().size(); i++) { - TopDocs mergedTopDocs = TopDocs.merge(source.knnSearch().get(i).k(), topDocsLists.get(i).toArray(new TopDocs[0])); - mergedResults.add(new DfsKnnResults(nestedPath.get(i).get(), mergedTopDocs.scoreDocs)); - } - return mergedResults; - } - /** * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each * named completion suggestion across all shards. If more than one named completion suggestion is specified in the @@ -496,38 +398,6 @@ private static SearchHits getHits( ); } - /** - * Reduces the given query results and consumes all aggregations and profile results. - * @param queryResults a list of non-null query shard results - */ - static ReducedQueryPhase reducedScrollQueryPhase(Collection queryResults) { - AggregationReduceContext.Builder aggReduceContextBuilder = new AggregationReduceContext.Builder() { - @Override - public AggregationReduceContext forPartialReduction() { - throw new UnsupportedOperationException("Scroll requests don't have aggs"); - } - - @Override - public AggregationReduceContext forFinalReduction() { - throw new UnsupportedOperationException("Scroll requests don't have aggs"); - } - }; - final TopDocsStats topDocsStats = new TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE); - final List topDocs = new ArrayList<>(); - for (SearchPhaseResult sortedResult : queryResults) { - QuerySearchResult queryResult = sortedResult.queryResult(); - final TopDocsAndMaxScore td = queryResult.consumeTopDocs(); - assert td != null; - topDocsStats.add(td, queryResult.searchTimedOut(), queryResult.terminatedEarly()); - // make sure we set the shard index before we add it - the consumer didn't do that yet - if (td.topDocs.scoreDocs.length > 0) { - setShardIndex(td.topDocs, queryResult.getShardIndex()); - topDocs.add(td.topDocs); - } - } - return reducedQueryPhase(queryResults, null, topDocs, topDocsStats, 0, true, aggReduceContextBuilder, null, true); - } - /** * Reduces the given query results and consumes all aggregations and profile results. * @param queryResults a list of non-null query shard results diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 2231f791384fa..53da76d96e405 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -10,21 +10,27 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.TopDocs; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.Nullable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -40,7 +46,7 @@ * fan out to nodes and execute the query part of the scroll request. Subclasses can for instance * run separate fetch phases etc. */ -abstract class SearchScrollAsyncAction implements Runnable { +abstract class SearchScrollAsyncAction { protected final Logger logger; protected final ActionListener listener; protected final ParsedScrollId scrollId; @@ -229,7 +235,7 @@ protected SearchPhase sendResponsePhase( ) { return new SearchPhase("fetch") { @Override - public void run() { + protected void run() { sendResponse(queryPhase, fetchResults); } }; @@ -301,4 +307,48 @@ protected void onShardFailure( protected Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { return searchTransportService.getConnection(clusterAlias, node); } + + /** + * Reduces the given query results and consumes all aggregations and profile results. + * @param queryResults a list of non-null query shard results + */ + protected static SearchPhaseController.ReducedQueryPhase reducedScrollQueryPhase(Collection queryResults) { + AggregationReduceContext.Builder aggReduceContextBuilder = new AggregationReduceContext.Builder() { + @Override + public AggregationReduceContext forPartialReduction() { + throw new UnsupportedOperationException("Scroll requests don't have aggs"); + } + + @Override + public AggregationReduceContext forFinalReduction() { + throw new UnsupportedOperationException("Scroll requests don't have aggs"); + } + }; + final SearchPhaseController.TopDocsStats topDocsStats = new SearchPhaseController.TopDocsStats( + SearchContext.TRACK_TOTAL_HITS_ACCURATE + ); + final List topDocs = new ArrayList<>(); + for (SearchPhaseResult sortedResult : queryResults) { + QuerySearchResult queryResult = sortedResult.queryResult(); + final TopDocsAndMaxScore td = queryResult.consumeTopDocs(); + assert td != null; + topDocsStats.add(td, queryResult.searchTimedOut(), queryResult.terminatedEarly()); + // make sure we set the shard index before we add it - the consumer didn't do that yet + if (td.topDocs.scoreDocs.length > 0) { + SearchPhaseController.setShardIndex(td.topDocs, queryResult.getShardIndex()); + topDocs.add(td.topDocs); + } + } + return SearchPhaseController.reducedQueryPhase( + queryResults, + null, + topDocs, + topDocsStats, + 0, + true, + aggReduceContextBuilder, + null, + true + ); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index 7b5ba21c80220..ba14b5bcd2cbe 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -51,7 +51,7 @@ protected void executeInitialPhase( @Override protected SearchPhase moveToNextPhase(BiFunction clusterNodeLookup) { - return sendResponsePhase(SearchPhaseController.reducedScrollQueryPhase(queryFetchResults.asList()), queryFetchResults); + return sendResponsePhase(reducedScrollQueryPhase(queryFetchResults.asList()), queryFetchResults); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 8c33e3ca7da4b..29822e596356f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -65,10 +65,8 @@ protected void executeInitialPhase( protected SearchPhase moveToNextPhase(BiFunction clusterNodeLookup) { return new SearchPhase("fetch") { @Override - public void run() { - final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = SearchPhaseController.reducedScrollQueryPhase( - queryResults.asList() - ); + protected void run() { + final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = reducedScrollQueryPhase(queryResults.asList()); ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs(); if (scoreDocs.length == 0) { sendResponse(reducedQueryPhase, fetchResults); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 36ca0fba94372..6c95a3c8fd436 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -270,7 +270,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase() { return new SearchPhase(getName()) { @Override - public void run() { + protected void run() { sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); } }; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index cffba76988f7d..b232cd16ba65e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -91,7 +91,7 @@ public void onFailure(Exception e) { }; try { ParsedScrollId scrollId = parseScrollId(request.scrollId()); - Runnable action = switch (scrollId.getType()) { + var action = switch (scrollId.getType()) { case QUERY_THEN_FETCH_TYPE -> new SearchScrollQueryThenFetchAsyncAction( logger, clusterService, diff --git a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java index 481b59c72729c..4496dfb7310fe 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java @@ -349,8 +349,8 @@ public void getIndex(GetIndexRequest request, ActionListener l execute(GetIndexAction.INSTANCE, request, listener); } - public GetIndexRequestBuilder prepareGetIndex() { - return new GetIndexRequestBuilder(this); + public GetIndexRequestBuilder prepareGetIndex(TimeValue masterTimeout) { + return new GetIndexRequestBuilder(this, masterTimeout); } public ActionFuture clearCache(final ClearIndicesCacheRequest request) { diff --git a/server/src/main/java/org/elasticsearch/client/internal/RemoteClusterClient.java b/server/src/main/java/org/elasticsearch/client/internal/RemoteClusterClient.java index 9e3497601fb57..f632ba33f7728 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/RemoteClusterClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/RemoteClusterClient.java @@ -47,7 +47,8 @@ void execute /** * Obtain a connection to the remote cluster for use with the {@link #execute} override that allows to specify the connection. Useful - * for cases where you need to inspect {@link Transport.Connection#getVersion} before deciding the exact remote action to invoke. + * for cases where you need to inspect {@link Transport.Connection#getTransportVersion} before deciding the exact remote action to + * invoke. */ void getConnection(@Nullable Request request, ActionListener listener); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java index 66fbe35fa52bf..230677a6c86c2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -101,9 +100,7 @@ public ClusterInfo(StreamInput in) throws IOException { this.dataPath = in.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION) ? in.readImmutableMap(NodeAndShard::new, StreamInput::readString) : in.readImmutableMap(nested -> NodeAndShard.from(new ShardRouting(nested)), StreamInput::readString); - this.reservedSpace = in.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION) - ? in.readImmutableMap(NodeAndPath::new, ReservedSpace::new) - : Map.of(); + this.reservedSpace = in.readImmutableMap(NodeAndPath::new, ReservedSpace::new); } @Override @@ -119,9 +116,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeMap(this.dataPath, (o, k) -> createFakeShardRoutingFromNodeAndShard(k).writeTo(o), StreamOutput::writeString); } - if (out.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - out.writeMap(this.reservedSpace); - } + out.writeMap(this.reservedSpace); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java index 673960c713391..17267525d4bdf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.NodeFeature; import java.io.IOException; @@ -23,7 +22,6 @@ */ public record DataStreamGlobalRetention(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) implements Writeable { - public static final NodeFeature GLOBAL_RETENTION = new NodeFeature("data_stream.lifecycle.global_retention", true); public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index 353f17fe0e00c..8366083b1907e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.unit.Processors; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -46,9 +45,6 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparable { - public static final NodeFeature RANGE_FLOAT_PROCESSORS_SUPPORTED = new NodeFeature("desired_node.range_float_processors"); - public static final NodeFeature DESIRED_NODE_VERSION_DEPRECATED = new NodeFeature("desired_node.version_deprecated", true); - public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; private static final ParseField SETTINGS_FIELD = new ParseField("settings"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index b2e0233463bf5..04f9448a936be 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -563,8 +563,6 @@ public Iterator> settings() { public static final String INDEX_STATE_FILE_PREFIX = "state-"; - static final TransportVersion SYSTEM_INDEX_FLAG_ADDED = TransportVersions.V_7_10_0; - static final TransportVersion STATS_AND_FORECAST_ADDED = TransportVersions.V_8_6_0; private final int routingNumShards; @@ -1644,11 +1642,7 @@ private static class IndexMetadataDiff implements Diff { } else { mappingsUpdatedVersion = IndexVersions.ZERO; } - if (in.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { - isSystem = in.readBoolean(); - } else { - isSystem = false; - } + isSystem = in.readBoolean(); timestampRange = IndexLongFieldRange.readFrom(in); if (in.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { stats = in.readOptionalWriteable(IndexMetadataStats::new); @@ -1694,9 +1688,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { IndexVersion.writeVersion(mappingsUpdatedVersion, out); } - if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { - out.writeBoolean(isSystem); - } + out.writeBoolean(isSystem); timestampRange.writeTo(out); if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { out.writeOptionalWriteable(stats); @@ -1798,9 +1790,7 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function VERIFIED_READ_ONLY_SETTING = Setting.boolSetting( + "index.verified_read_only", + false, + Setting.Property.IndexScope, + Setting.Property.NotCopyableOnResize, + // Allow the setting to be updated in snapshot builds + Build.current().isSnapshot() ? Setting.Property.OperatorDynamic : Setting.Property.PrivateIndex + ); + private final ClusterService clusterService; private final AllocationService allocationService; private final IndexMetadataVerifier indexMetadataVerifier; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index e38cd677991f3..24b14a46c8782 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -54,8 +54,6 @@ */ public abstract class IndexRouting { - static final NodeFeature BOOLEAN_ROUTING_PATH = new NodeFeature("routing.boolean_routing_path", true); - static final NodeFeature MULTI_VALUE_ROUTING_PATH = new NodeFeature("routing.multi_value_routing_path", true); static final NodeFeature LOGSB_ROUTE_ON_SORT_FIELDS = new NodeFeature("routing.logsb_route_on_sort_fields"); /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java index 1545fdf90d111..461ac50e1efc8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java @@ -18,7 +18,7 @@ public class RoutingFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(IndexRouting.BOOLEAN_ROUTING_PATH, IndexRouting.MULTI_VALUE_ROUTING_PATH); + return Set.of(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 3c1f53ca4a2c9..b5a513777756f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -174,6 +174,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.DEFAULT_PIPELINE, IndexSettings.FINAL_PIPELINE, MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING, + MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING, ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING, DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS, ShardLimitValidator.INDEX_SETTING_SHARD_LIMIT_GROUP, diff --git a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java deleted file mode 100644 index 72fc955320b94..0000000000000 --- a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.health; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class HealthFeatures implements FeatureSpecification { - - public static final NodeFeature SUPPORTS_EXTENDED_REPOSITORY_INDICATOR = new NodeFeature("health.extended_repository_indicator", true); - - @Override - public Set getFeatures() { - return Set.of(SUPPORTS_EXTENDED_REPOSITORY_INDICATOR); - } -} diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index aab9e972cba73..113e789727f0a 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.metadata.HealthMetadata; import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException; import org.elasticsearch.health.node.selection.HealthNode; @@ -62,7 +61,6 @@ public class LocalHealthMonitor implements ClusterStateListener { private final ClusterService clusterService; private final ThreadPool threadPool; private final Client client; - private final FeatureService featureService; private volatile TimeValue monitorInterval; private volatile boolean enabled; @@ -88,7 +86,6 @@ private LocalHealthMonitor( ClusterService clusterService, ThreadPool threadPool, Client client, - FeatureService featureService, List> healthTrackers ) { this.threadPool = threadPool; @@ -96,7 +93,6 @@ private LocalHealthMonitor( this.enabled = HealthNodeTaskExecutor.ENABLED_SETTING.get(settings); this.clusterService = clusterService; this.client = client; - this.featureService = featureService; this.healthTrackers = healthTrackers; } @@ -105,17 +101,9 @@ public static LocalHealthMonitor create( ClusterService clusterService, ThreadPool threadPool, Client client, - FeatureService featureService, List> healthTrackers ) { - LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor( - settings, - clusterService, - threadPool, - client, - featureService, - healthTrackers - ); + LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(settings, clusterService, threadPool, client, healthTrackers); localHealthMonitor.registerListeners(); return localHealthMonitor; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 766b6ecbc7b9d..284140460a437 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -817,8 +816,6 @@ private static String getIgnoreAboveDefaultValue(final Settings settings) { } } - public static final NodeFeature IGNORE_ABOVE_INDEX_LEVEL_SETTING = new NodeFeature("mapper.ignore_above_index_level_setting", true); - private final Index index; private final IndexVersion version; private final Logger logger; diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 8d3d1bde316ea..40839d8e18781 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -819,7 +819,7 @@ private GetResult getFromTranslog( ) throws IOException { assert get.isReadFromTranslog(); translogGetCount.incrementAndGet(); - final TranslogDirectoryReader inMemoryReader = new TranslogDirectoryReader( + final DirectoryReader inMemoryReader = TranslogDirectoryReader.create( shardId, index, mappingLookup, @@ -3161,7 +3161,7 @@ public Translog.Snapshot newChangesSnapshot( final Translog.Snapshot snapshot; if (engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) { snapshot = new LuceneSyntheticSourceChangesSnapshot( - engineConfig.getMapperService().mappingLookup(), + engineConfig.getMapperService(), searcher, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE, maxChunkSize, @@ -3173,6 +3173,7 @@ public Translog.Snapshot newChangesSnapshot( ); } else { snapshot = new LuceneChangesSnapshot( + engineConfig.getMapperService(), searcher, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index d4466cbc17c54..30c6c639b9cf7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -19,6 +19,7 @@ import org.elasticsearch.core.Assertions; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fieldvisitor.FieldsVisitor; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.transport.Transports; @@ -46,6 +47,7 @@ public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot { /** * Creates a new "translog" snapshot from Lucene for reading operations whose seq# in the specified range. * + * @param mapperService the mapper service for this index * @param engineSearcher the internal engine searcher which will be taken over if the snapshot is opened successfully * @param searchBatchSize the number of documents should be returned by each search * @param fromSeqNo the min requesting seq# - inclusive @@ -56,6 +58,7 @@ public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot { * @param indexVersionCreated the version on which this index was created */ public LuceneChangesSnapshot( + MapperService mapperService, Engine.Searcher engineSearcher, int searchBatchSize, long fromSeqNo, @@ -65,7 +68,7 @@ public LuceneChangesSnapshot( boolean accessStats, IndexVersion indexVersionCreated ) throws IOException { - super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated); + super(mapperService, engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated); this.creationThread = Assertions.ENABLED ? Thread.currentThread() : null; this.singleConsumer = singleConsumer; this.parallelArray = new ParallelArray(this.searchBatchSize); @@ -214,20 +217,24 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException { if (leaf.reader() instanceof SequentialStoredFieldsLeafReader) { storedFieldsReader = ((SequentialStoredFieldsLeafReader) leaf.reader()).getSequentialStoredFieldsReader(); storedFieldsReaderOrd = leaf.ord; + setNextSourceMetadataReader(leaf); } else { storedFieldsReader = null; storedFieldsReaderOrd = -1; } } } + if (storedFieldsReader != null) { assert singleConsumer : "Sequential access optimization must not be enabled for multiple consumers"; assert parallelArray.useSequentialStoredFieldsReader; assert storedFieldsReaderOrd == leaf.ord : storedFieldsReaderOrd + " != " + leaf.ord; storedFieldsReader.document(segmentDocID, fields); } else { + setNextSourceMetadataReader(leaf); leaf.reader().storedFields().document(segmentDocID, fields); } + final BytesReference source = fields.source() != null ? addSourceMetadata(fields.source(), segmentDocID) : null; final Translog.Operation op; final boolean isTombstone = parallelArray.isTombStone[docIndex]; @@ -241,7 +248,6 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException { op = new Translog.Delete(id, seqNo, primaryTerm, version); assert assertDocSoftDeleted(leaf.reader(), segmentDocID) : "Delete op but soft_deletes field is not set [" + op + "]"; } else { - final BytesReference source = fields.source(); if (source == null) { // TODO: Callers should ask for the range that source should be retained. Thus we should always // check for the existence source once we make peer-recovery to send ops after the local checkpoint. diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java index 08508103181ed..20154c20b3634 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java @@ -13,12 +13,11 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; -import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMetrics; import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.translog.Translog; @@ -66,7 +65,7 @@ boolean hasRecoverySourceSize() { private final Deque operationQueue = new LinkedList<>(); public LuceneSyntheticSourceChangesSnapshot( - MappingLookup mappingLookup, + MapperService mapperService, Engine.Searcher engineSearcher, int searchBatchSize, long maxMemorySizeInBytes, @@ -76,13 +75,13 @@ public LuceneSyntheticSourceChangesSnapshot( boolean accessStats, IndexVersion indexVersionCreated ) throws IOException { - super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated); + super(mapperService, engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated); // a MapperService#updateMapping(...) of empty index may not have been invoked and then mappingLookup is empty - assert engineSearcher.getDirectoryReader().maxDoc() == 0 || mappingLookup.isSourceSynthetic() + assert engineSearcher.getDirectoryReader().maxDoc() == 0 || mapperService.mappingLookup().isSourceSynthetic() : "either an empty index or synthetic source must be enabled for proper functionality."; // ensure we can buffer at least one document this.maxMemorySizeInBytes = maxMemorySizeInBytes > 0 ? maxMemorySizeInBytes : 1; - this.sourceLoader = mappingLookup.newSourceLoader(null, SourceFieldMetrics.NOOP); + this.sourceLoader = mapperService.mappingLookup().newSourceLoader(null, SourceFieldMetrics.NOOP); Set storedFields = sourceLoader.requiredStoredFields(); this.storedFieldLoader = StoredFieldLoader.create(false, storedFields); this.lastSeenSeqNo = fromSeqNo - 1; @@ -194,6 +193,7 @@ private Translog.Operation[] loadDocuments(List documentRecords) t leafFieldLoader = storedFieldLoader.getLoader(leafReaderContext, null); leafSourceLoader = sourceLoader.leaf(leafReaderContext.reader(), null); + setNextSourceMetadataReader(leafReaderContext); } int segmentDocID = docRecord.docID() - docBase; leafFieldLoader.advanceTo(segmentDocID); @@ -229,17 +229,16 @@ private Translog.Operation createOperation( return null; } } - BytesReference source = sourceLoader.source(fieldLoader, segmentDocID).internalSourceRef(); + var sourceBytes = addSourceMetadata(sourceLoader.source(fieldLoader, segmentDocID).internalSourceRef(), segmentDocID); return new Translog.Index( fieldLoader.id(), docRecord.seqNo(), docRecord.primaryTerm(), docRecord.version(), - source, + sourceBytes, fieldLoader.routing(), -1 // autogenerated timestamp ); } } - } diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 010fc1bd9e411..63a4696ddb08e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -97,7 +97,7 @@ public class ReadOnlyEngine extends Engine { @SuppressWarnings("this-escape") public ReadOnlyEngine( EngineConfig config, - SeqNoStats seqNoStats, + @Nullable SeqNoStats seqNoStats, @Nullable TranslogStats translogStats, boolean obtainLock, Function readerWrapperFunction, diff --git a/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java index 191125c59705e..8a96d4a2a252c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java @@ -22,12 +22,17 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldCollectorManager; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.core.IOUtils; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.search.lookup.Source; import java.io.Closeable; import java.io.IOException; @@ -44,6 +49,7 @@ public abstract class SearchBasedChangesSnapshot implements Translog.Snapshot, C private final IndexVersion indexVersionCreated; private final IndexSearcher indexSearcher; + private final ValueFetcher sourceMetadataFetcher; private final Closeable onClose; protected final long fromSeqNo, toSeqNo; @@ -67,6 +73,7 @@ public abstract class SearchBasedChangesSnapshot implements Translog.Snapshot, C * @param indexVersionCreated Version of the index when it was created. */ protected SearchBasedChangesSnapshot( + MapperService mapperService, Engine.Searcher engineSearcher, int searchBatchSize, long fromSeqNo, @@ -103,6 +110,19 @@ protected SearchBasedChangesSnapshot( this.accessStats = accessStats; this.totalHits = accessStats ? indexSearcher.count(rangeQuery(fromSeqNo, toSeqNo, indexVersionCreated)) : -1; + this.sourceMetadataFetcher = createSourceMetadataValueFetcher(mapperService, indexSearcher); + } + + private ValueFetcher createSourceMetadataValueFetcher(MapperService mapperService, IndexSearcher searcher) { + if (mapperService.mappingLookup().inferenceFields().isEmpty()) { + return null; + } + var mapper = (InferenceMetadataFieldsMapper) mapperService.mappingLookup() + .getMapping() + .getMetadataMapperByName(InferenceMetadataFieldsMapper.NAME); + return mapper != null + ? mapper.fieldType().valueFetcher(mapperService.mappingLookup(), mapperService.getBitSetProducer(), searcher) + : null; } /** @@ -184,6 +204,45 @@ protected TopDocs nextTopDocs() throws IOException { return results; } + /** + * Sets the reader context to enable reading metadata that was removed from the {@code _source}. + * This method sets up the {@code sourceMetadataFetcher} with the provided {@link LeafReaderContext}, + * ensuring it is ready to fetch metadata for subsequent operations. + * + *

Note: This method should be called before {@link #addSourceMetadata(BytesReference, int)} at the start of every leaf + * to ensure the metadata fetcher is properly initialized.

+ */ + protected void setNextSourceMetadataReader(LeafReaderContext context) { + if (sourceMetadataFetcher != null) { + sourceMetadataFetcher.setNextReader(context); + } + } + + /** + * Creates a new {@link Source} object by combining the provided {@code originalSource} + * with additional metadata fields. If the {@code sourceMetadataFetcher} is null or no metadata + * fields are fetched, the original source is returned unchanged. + * + * @param originalSourceBytes the original source bytes + * @param segmentDocID the document ID used to fetch metadata fields + * @return a new {@link Source} instance containing the original data and additional metadata, + * or the original source if no metadata is added + * @throws IOException if an error occurs while fetching metadata values + */ + protected BytesReference addSourceMetadata(BytesReference originalSourceBytes, int segmentDocID) throws IOException { + if (sourceMetadataFetcher == null) { + return originalSourceBytes; + } + var originalSource = Source.fromBytes(originalSourceBytes); + List values = sourceMetadataFetcher.fetchValues(originalSource, segmentDocID, List.of()); + if (values.isEmpty()) { + return originalSourceBytes; + } + var map = originalSource.source(); + map.put(InferenceMetadataFieldsMapper.NAME, values.get(0)); + return Source.fromMap(map, originalSource.sourceContentType()).internalSourceRef(); + } + static IndexSearcher newIndexSearcher(Engine.Searcher engineSearcher) throws IOException { return new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader())); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java index 0928b4500e6da..ac5bf31c2b730 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.engine; +import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.index.BaseTermsEnum; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.ByteVectorValues; @@ -46,6 +47,9 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.IOUtils; import org.elasticsearch.index.fieldvisitor.FieldNamesProvidingStoredFieldsVisitor; @@ -76,9 +80,9 @@ * into an in-memory Lucene segment that is created on-demand. */ final class TranslogDirectoryReader extends DirectoryReader { - private final TranslogLeafReader leafReader; + private final LeafReader leafReader; - TranslogDirectoryReader( + static DirectoryReader create( ShardId shardId, Translog.Index operation, MappingLookup mappingLookup, @@ -86,11 +90,39 @@ final class TranslogDirectoryReader extends DirectoryReader { EngineConfig engineConfig, Runnable onSegmentCreated ) throws IOException { - this(new TranslogLeafReader(shardId, operation, mappingLookup, documentParser, engineConfig, onSegmentCreated)); + final Directory directory = new ByteBuffersDirectory(); + boolean success = false; + try { + final LeafReader leafReader; + // When using synthetic source, the translog operation must always be reindexed into an in-memory Lucene to ensure consistent + // output for realtime-get operations. However, this can degrade the performance of realtime-get and update operations. + // If slight inconsistencies in realtime-get operations are acceptable, the translog operation can be reindexed lazily. + if (mappingLookup.isSourceSynthetic()) { + onSegmentCreated.run(); + leafReader = createInMemoryReader(shardId, engineConfig, directory, documentParser, mappingLookup, false, operation); + } else { + leafReader = new TranslogLeafReader( + shardId, + operation, + mappingLookup, + documentParser, + engineConfig, + directory, + onSegmentCreated + ); + } + var directoryReader = ElasticsearchDirectoryReader.wrap(new TranslogDirectoryReader(directory, leafReader), shardId); + success = true; + return directoryReader; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(directory); + } + } } - private TranslogDirectoryReader(TranslogLeafReader leafReader) throws IOException { - super(leafReader.directory, new LeafReader[] { leafReader }, null); + private TranslogDirectoryReader(Directory directory, LeafReader leafReader) throws IOException { + super(directory, new LeafReader[] { leafReader }, null); this.leafReader = leafReader; } @@ -139,12 +171,13 @@ public CacheHelper getReaderCacheHelper() { return leafReader.getReaderCacheHelper(); } - static DirectoryReader createInMemoryReader( + private static LeafReader createInMemoryReader( ShardId shardId, EngineConfig engineConfig, Directory directory, DocumentParser documentParser, MappingLookup mappingLookup, + boolean rootDocOnly, Translog.Index operation ) { final ParsedDocument parsedDocs = documentParser.parseDocument( @@ -159,12 +192,21 @@ static DirectoryReader createInMemoryReader( IndexWriterConfig.OpenMode.CREATE ).setCodec(engineConfig.getCodec()); try (IndexWriter writer = new IndexWriter(directory, writeConfig)) { - writer.addDocument(parsedDocs.rootDoc()); + final int numDocs; + if (rootDocOnly) { + numDocs = 1; + writer.addDocument(parsedDocs.rootDoc()); + } else { + numDocs = parsedDocs.docs().size(); + writer.addDocuments(parsedDocs.docs()); + } final DirectoryReader reader = open(writer); - if (reader.leaves().size() != 1 || reader.leaves().get(0).reader().numDocs() != 1) { + if (reader.leaves().size() != 1 || reader.leaves().get(0).reader().numDocs() != numDocs) { reader.close(); throw new IllegalStateException( - "Expected a single document segment; " + "Expected a single segment with " + + numDocs + + " documents, " + "but [" + reader.leaves().size() + " segments with " @@ -172,7 +214,33 @@ static DirectoryReader createInMemoryReader( + " documents" ); } - return reader; + LeafReader leafReader = reader.leaves().get(0).reader(); + return new SequentialStoredFieldsLeafReader(leafReader) { + @Override + protected void doClose() throws IOException { + IOUtils.close(super::doClose, directory); + } + + @Override + public CacheHelper getCoreCacheHelper() { + return leafReader.getCoreCacheHelper(); + } + + @Override + public CacheHelper getReaderCacheHelper() { + return leafReader.getReaderCacheHelper(); + } + + @Override + public StoredFieldsReader getSequentialStoredFieldsReader() { + return Lucene.segmentReader(leafReader).getFieldsReader().getMergeInstance(); + } + + @Override + protected StoredFieldsReader doGetSequentialStoredFieldsReader(StoredFieldsReader reader) { + return reader; + } + }; } catch (IOException e) { throw new EngineException(shardId, "failed to create an in-memory segment for get [" + operation.id() + "]", e); } @@ -259,6 +327,7 @@ private static class TranslogLeafReader extends LeafReader { MappingLookup mappingLookup, DocumentParser documentParser, EngineConfig engineConfig, + Directory directory, Runnable onSegmentCreated ) { this.shardId = shardId; @@ -267,7 +336,7 @@ private static class TranslogLeafReader extends LeafReader { this.documentParser = documentParser; this.engineConfig = engineConfig; this.onSegmentCreated = onSegmentCreated; - this.directory = new ByteBuffersDirectory(); + this.directory = directory; this.uid = Uid.encodeId(operation.id()); } @@ -279,7 +348,15 @@ private LeafReader getDelegate() { ensureOpen(); reader = delegate.get(); if (reader == null) { - var indexReader = createInMemoryReader(shardId, engineConfig, directory, documentParser, mappingLookup, operation); + var indexReader = createInMemoryReader( + shardId, + engineConfig, + directory, + documentParser, + mappingLookup, + true, + operation + ); reader = indexReader.leaves().get(0).reader(); final LeafReader existing = delegate.getAndSet(reader); assert existing == null; @@ -443,7 +520,7 @@ private void readStoredFieldsDirectly(StoredFieldVisitor visitor) throws IOExcep SourceFieldMapper mapper = mappingLookup.getMapping().getMetadataMapperByClass(SourceFieldMapper.class); if (mapper != null) { try { - sourceBytes = mapper.applyFilters(mappingLookup, sourceBytes, null); + sourceBytes = mapper.applyFilters(mappingLookup, sourceBytes, null, true); } catch (IOException e) { throw new IOException("Failed to reapply filters after reading from translog", e); } @@ -464,7 +541,12 @@ private void readStoredFieldsDirectly(StoredFieldVisitor visitor) throws IOExcep @Override protected synchronized void doClose() throws IOException { - IOUtils.close(delegate.get(), directory); + final LeafReader leaf = delegate.get(); + if (leaf != null) { + leaf.close(); + } else { + directory.close(); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogOperationAsserter.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogOperationAsserter.java index 00de13b8e8d8e..4170d06c4d6ea 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogOperationAsserter.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogOperationAsserter.java @@ -10,7 +10,6 @@ package org.elasticsearch.index.engine; import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.store.ByteBuffersDirectory; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.mapper.DocumentParser; import org.elasticsearch.index.mapper.MappingLookup; @@ -52,10 +51,7 @@ static Translog.Index synthesizeSource(EngineConfig engineConfig, Translog.Index final ShardId shardId = engineConfig.getShardId(); final MappingLookup mappingLookup = engineConfig.getMapperService().mappingLookup(); final DocumentParser documentParser = engineConfig.getMapperService().documentParser(); - try ( - var directory = new ByteBuffersDirectory(); - var reader = TranslogDirectoryReader.createInMemoryReader(shardId, engineConfig, directory, documentParser, mappingLookup, op) - ) { + try (var reader = TranslogDirectoryReader.create(shardId, op, mappingLookup, documentParser, engineConfig, () -> {})) { final Engine.Searcher searcher = new Engine.Searcher( "assert_translog", reader, @@ -66,7 +62,7 @@ static Translog.Index synthesizeSource(EngineConfig engineConfig, Translog.Index ); try ( LuceneSyntheticSourceChangesSnapshot snapshot = new LuceneSyntheticSourceChangesSnapshot( - mappingLookup, + engineConfig.getMapperService(), searcher, LuceneSyntheticSourceChangesSnapshot.DEFAULT_BATCH_SIZE, Integer.MAX_VALUE, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 9de463ec5f6f6..77de1654cf4ba 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -62,8 +61,6 @@ public class BooleanFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "boolean"; - static final NodeFeature BOOLEAN_DIMENSION = new NodeFeature("mapper.boolean_dimension", true); - public static class Values { public static final BytesRef TRUE = new BytesRef("T"); public static final BytesRef FALSE = new BytesRef("F"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 068a9828809dd..cf0c355a22e65 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSortConfig; @@ -22,8 +21,6 @@ import java.util.List; public class DocumentMapper { - static final NodeFeature INDEX_SORTING_ON_NESTED = new NodeFeature("mapper.index_sorting_on_nested", true); - private final String type; private final CompressedXContent mappingSource; private final MappingLookup mappingLookup; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index e03494dcb5926..bdb3d97d4c187 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -56,7 +56,6 @@ public class IgnoredSourceFieldMapper extends MetadataFieldMapper { public static final TypeParser PARSER = new FixedTypeParser(context -> new IgnoredSourceFieldMapper(context.getIndexSettings())); - static final NodeFeature TRACK_IGNORED_SOURCE = new NodeFeature("mapper.track_ignored_source", true); static final NodeFeature DONT_EXPAND_DOTS_IN_IGNORED_SOURCE = new NodeFeature("mapper.ignored_source.dont_expand_dots"); static final NodeFeature IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD = new NodeFeature( "mapper.ignored_source_as_top_level_metadata_array_field" diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java index e539c07caef61..9708753926e1d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java @@ -13,7 +13,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -31,8 +30,6 @@ public class IndexModeFieldMapper extends MetadataFieldMapper { - static final NodeFeature QUERYING_INDEX_MODE = new NodeFeature("mapper.query_index_mode", true); - public static final String NAME = "_index_mode"; public static final String CONTENT_TYPE = "_index_mode"; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index a26a4bb80d50e..bdcf9bf98279f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.lucene.search.AutomatonQueries; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -89,9 +88,6 @@ public final class KeywordFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "keyword"; - static final NodeFeature KEYWORD_DIMENSION_IGNORE_ABOVE = new NodeFeature("mapper.keyword_dimension_ignore_above", true); - static final NodeFeature KEYWORD_NORMALIZER_SYNTHETIC_SOURCE = new NodeFeature("mapper.keyword_normalizer_synthetic_source", true); - public static class Defaults { public static final FieldType FIELD_TYPE; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index f293ced122d23..bafa74b662f00 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.StringLiteralDeduplicator; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -30,8 +29,6 @@ public abstract class Mapper implements ToXContentFragment, Iterable { - public static final NodeFeature SYNTHETIC_SOURCE_KEEP_FEATURE = new NodeFeature("mapper.synthetic_source_keep", true); - public static final String SYNTHETIC_SOURCE_KEEP_PARAM = "synthetic_source_keep"; // Only relevant for synthetic source mode. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 5dbaf0e0f40ad..8e669a91fd9ea 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -11,9 +11,6 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper; -import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import java.util.Set; @@ -28,33 +25,7 @@ public class MapperFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of( - BWC_WORKAROUND_9_0, - IgnoredSourceFieldMapper.TRACK_IGNORED_SOURCE, - PassThroughObjectMapper.PASS_THROUGH_PRIORITY, - RangeFieldMapper.NULL_VALUES_OFF_BY_ONE_FIX, - SourceFieldMapper.SYNTHETIC_SOURCE_FALLBACK, - DenseVectorFieldMapper.INT4_QUANTIZATION, - DenseVectorFieldMapper.BIT_VECTORS, - DocumentMapper.INDEX_SORTING_ON_NESTED, - KeywordFieldMapper.KEYWORD_DIMENSION_IGNORE_ABOVE, - IndexModeFieldMapper.QUERYING_INDEX_MODE, - NodeMappingStats.SEGMENT_LEVEL_FIELDS_STATS, - BooleanFieldMapper.BOOLEAN_DIMENSION, - ObjectMapper.SUBOBJECTS_AUTO, - ObjectMapper.SUBOBJECTS_AUTO_FIXES, - KeywordFieldMapper.KEYWORD_NORMALIZER_SYNTHETIC_SOURCE, - SourceFieldMapper.SYNTHETIC_SOURCE_STORED_FIELDS_ADVANCE_FIX, - Mapper.SYNTHETIC_SOURCE_KEEP_FEATURE, - SourceFieldMapper.SYNTHETIC_SOURCE_WITH_COPY_TO_AND_DOC_VALUES_FALSE_SUPPORT, - SourceFieldMapper.SYNTHETIC_SOURCE_COPY_TO_FIX, - FlattenedFieldMapper.IGNORE_ABOVE_SUPPORT, - IndexSettings.IGNORE_ABOVE_INDEX_LEVEL_SETTING, - SourceFieldMapper.SYNTHETIC_SOURCE_COPY_TO_INSIDE_OBJECTS_FIX, - TimeSeriesRoutingHashFieldMapper.TS_ROUTING_HASH_FIELD_PARSES_BYTES_REF, - FlattenedFieldMapper.IGNORE_ABOVE_WITH_ARRAYS_SUPPORT, - DenseVectorFieldMapper.BBQ_FORMAT - ); + return Set.of(BWC_WORKAROUND_9_0); } public static final NodeFeature CONSTANT_KEYWORD_SYNTHETIC_SOURCE_WRITE_FIX = new NodeFeature( diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java b/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java index e7ca7367832b6..0987c6dfb8c8b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -28,8 +27,6 @@ */ public class NodeMappingStats implements Writeable, ToXContentFragment { - public static final NodeFeature SEGMENT_LEVEL_FIELDS_STATS = new NodeFeature("mapper.segment_level_fields_stats", true); - private static final class Fields { static final String MAPPINGS = "mappings"; static final String TOTAL_COUNT = "total_count"; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index e734a8e5b4377..86ce4fbb74837 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -52,9 +51,6 @@ public class ObjectMapper extends Mapper { public static final String CONTENT_TYPE = "object"; static final String STORE_ARRAY_SOURCE_PARAM = "store_array_source"; - static final NodeFeature SUBOBJECTS_AUTO = new NodeFeature("mapper.subobjects_auto", true); - // No-op. All uses of this feature were reverted but node features can't be removed. - static final NodeFeature SUBOBJECTS_AUTO_FIXES = new NodeFeature("mapper.subobjects_auto_fixes", true); /** * Enhances the previously boolean option for subobjects support with an intermediate mode `auto` that uses diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java index d16acab11a508..fbf8dd4538037 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java @@ -10,7 +10,6 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.Explicit; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xcontent.XContentBuilder; @@ -39,8 +38,6 @@ public class PassThroughObjectMapper extends ObjectMapper { public static final String CONTENT_TYPE = "passthrough"; public static final String PRIORITY_PARAM_NAME = "priority"; - static final NodeFeature PASS_THROUGH_PRIORITY = new NodeFeature("mapper.pass_through_priority", true); - public static class Builder extends ObjectMapper.Builder { // Controls whether subfields are configured as time-series dimensions. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index 36f61311ddfc7..461ad74a9434d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -51,7 +51,6 @@ /** A {@link FieldMapper} for indexing numeric and date ranges, and creating queries */ public class RangeFieldMapper extends FieldMapper { - public static final NodeFeature NULL_VALUES_OFF_BY_ONE_FIX = new NodeFeature("mapper.range.null_values_off_by_one_fix", true); public static final NodeFeature DATE_RANGE_INDEXING_FIX = new NodeFeature("mapper.range.date_range_indexing_fix"); public static final boolean DEFAULT_INCLUDE_UPPER = true; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 36335be58ce94..6a06d8ba4df28 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -26,8 +26,11 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.engine.SearchBasedChangesSnapshot; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.fetch.FetchContext; +import org.elasticsearch.search.fetch.subphase.FetchSourcePhase; import org.elasticsearch.search.lookup.Source; import org.elasticsearch.search.lookup.SourceFilter; import org.elasticsearch.xcontent.XContentType; @@ -40,20 +43,6 @@ import java.util.Locale; public class SourceFieldMapper extends MetadataFieldMapper { - public static final NodeFeature SYNTHETIC_SOURCE_FALLBACK = new NodeFeature("mapper.source.synthetic_source_fallback", true); - public static final NodeFeature SYNTHETIC_SOURCE_STORED_FIELDS_ADVANCE_FIX = new NodeFeature( - "mapper.source.synthetic_source_stored_fields_advance_fix", - true - ); - public static final NodeFeature SYNTHETIC_SOURCE_WITH_COPY_TO_AND_DOC_VALUES_FALSE_SUPPORT = new NodeFeature( - "mapper.source.synthetic_source_with_copy_to_and_doc_values_false", - true - ); - public static final NodeFeature SYNTHETIC_SOURCE_COPY_TO_FIX = new NodeFeature("mapper.source.synthetic_source_copy_to_fix", true); - public static final NodeFeature SYNTHETIC_SOURCE_COPY_TO_INSIDE_OBJECTS_FIX = new NodeFeature( - "mapper.source.synthetic_source_copy_to_inside_objects_fix", - true - ); public static final NodeFeature REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION = new NodeFeature( "mapper.source.remove_synthetic_source_only_validation" ); @@ -400,9 +389,14 @@ public boolean isComplete() { @Override public void preParse(DocumentParserContext context) throws IOException { - BytesReference originalSource = context.sourceToParse().source(); + int originalSourceLength = context.sourceToParse().source().length(); XContentType contentType = context.sourceToParse().getXContentType(); - final BytesReference adaptedSource = applyFilters(context.mappingLookup(), originalSource, contentType); + BytesReference originalSource = removeInferenceMetadataFields( + context.mappingLookup(), + context.sourceToParse().source(), + contentType + ); + final BytesReference adaptedSource = applyFilters(context.mappingLookup(), originalSource, contentType, false); if (adaptedSource != null) { final BytesRef ref = adaptedSource.toBytesRef(); @@ -421,7 +415,7 @@ public void preParse(DocumentParserContext context) throws IOException { * This size is used in {@link LuceneSyntheticSourceChangesSnapshot} to control memory * usage during the recovery process when loading a batch of synthetic sources. */ - context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_SIZE_NAME, ref.length)); + context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_SIZE_NAME, originalSourceLength)); } else { context.doc().add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1)); @@ -429,22 +423,48 @@ public void preParse(DocumentParserContext context) throws IOException { } } + /** + * Removes the {@link InferenceMetadataFieldsMapper} content from the {@code _source} if it is present. + * This metadata is regenerated at query or snapshot recovery time using stored fields and doc values. + * + *

For details on how the metadata is re-added, see:

+ *
    + *
  • {@link SearchBasedChangesSnapshot#addSourceMetadata(BytesReference, int)}
  • + *
  • {@link FetchSourcePhase#getProcessor(FetchContext)}
  • + *
+ */ + private BytesReference removeInferenceMetadataFields( + MappingLookup mappingLookup, + @Nullable BytesReference originalSource, + @Nullable XContentType contentType + ) { + if (originalSource != null + && InferenceMetadataFieldsMapper.isEnabled(mappingLookup) + && mappingLookup.inferenceFields().isEmpty() == false) { + return Source.fromBytes(originalSource, contentType) + .filter(new SourceFilter(new String[] {}, new String[] { InferenceMetadataFieldsMapper.NAME })) + .internalSourceRef(); + } else { + return originalSource; + } + } + @Nullable public BytesReference applyFilters( - @Nullable MappingLookup mappingLookup, + MappingLookup mappingLookup, @Nullable BytesReference originalSource, - @Nullable XContentType contentType + @Nullable XContentType contentType, + boolean removeMetadataFields ) throws IOException { if (stored() == false || originalSource == null) { return null; } var modSourceFilter = sourceFilter; - if (mappingLookup != null + if (removeMetadataFields && InferenceMetadataFieldsMapper.isEnabled(mappingLookup) && mappingLookup.inferenceFields().isEmpty() == false) { - /** - * Removes {@link InferenceMetadataFieldsMapper} content from _source. - * This content is re-generated at query time (if requested) using stored fields and doc values. + /* + * Removes the {@link InferenceMetadataFieldsMapper} content from the {@code _source}. */ String[] modExcludes = new String[excludes != null ? excludes.length + 1 : 1]; if (excludes != null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java index 2a7069c5a52e3..4a6ba5d1fa800 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ByteUtils; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.fielddata.FieldData; @@ -47,10 +46,6 @@ public class TimeSeriesRoutingHashFieldMapper extends MetadataFieldMapper { public static final TimeSeriesRoutingHashFieldMapper INSTANCE = new TimeSeriesRoutingHashFieldMapper(); public static final TypeParser PARSER = new FixedTypeParser(c -> c.getIndexSettings().getMode().timeSeriesRoutingHashFieldMapper()); - static final NodeFeature TS_ROUTING_HASH_FIELD_PARSES_BYTES_REF = new NodeFeature( - "tsdb.ts_routing_hash_doc_value_parse_byte_ref", - true - ); public static final DocValueFormat TS_ROUTING_HASH_DOC_VALUE_FORMAT = TimeSeriesRoutingHashFieldType.DOC_VALUE_FORMAT; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java index fc3f297f97252..7ef12f6dd30d2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -112,12 +111,6 @@ */ public final class FlattenedFieldMapper extends FieldMapper { - public static final NodeFeature IGNORE_ABOVE_SUPPORT = new NodeFeature("flattened.ignore_above_support", true); - public static final NodeFeature IGNORE_ABOVE_WITH_ARRAYS_SUPPORT = new NodeFeature( - "mapper.flattened.ignore_above_with_arrays_support", - true - ); - public static final String CONTENT_TYPE = "flattened"; public static final String KEYED_FIELD_SUFFIX = "._keyed"; public static final String KEYED_IGNORED_VALUES_FIELD_SUFFIX = "._keyed._ignored"; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 3e0656205b976..5edff48577efc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -38,7 +38,6 @@ import org.apache.lucene.util.VectorUtil; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.codec.vectors.ES813FlatVectorFormat; @@ -108,10 +107,6 @@ public static boolean isNotUnitVector(float magnitude) { return Math.abs(magnitude - 1.0f) > EPS; } - public static final NodeFeature INT4_QUANTIZATION = new NodeFeature("mapper.vectors.int4_quantization", true); - public static final NodeFeature BIT_VECTORS = new NodeFeature("mapper.vectors.bit_vectors", true); - public static final NodeFeature BBQ_FORMAT = new NodeFeature("mapper.vectors.bbq", true); - public static final IndexVersion MAGNITUDE_STORED_INDEX_VERSION = IndexVersions.V_7_5_0; public static final IndexVersion INDEXED_BY_DEFAULT_INDEX_VERSION = IndexVersions.FIRST_DETACHED_INDEX_VERSION; public static final IndexVersion NORMALIZE_COSINE = IndexVersions.NORMALIZED_VECTOR_COSINE; diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java index 575c15d5c063e..4cddf8f91ab3f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java @@ -36,6 +36,8 @@ import java.util.Objects; /** + * Deprecated geo query. Deprecated in #64227, 7.12/8.0. We do not plan to remove this so we + * do not break any users using this. * @deprecated use {@link GeoShapeQueryBuilder} */ @Deprecated diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index edf32caaee4ac..cbdc700d1e188 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -8,7 +8,6 @@ */ package org.elasticsearch.index.query; -import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -189,11 +188,7 @@ public InnerHitBuilder(StreamInput in) throws IOException { highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); this.innerCollapseBuilder = in.readOptionalWriteable(CollapseBuilder::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - if (in.readBoolean()) { - fetchFields = in.readCollectionAsList(FieldAndFormat::new); - } - } + fetchFields = in.readOptionalCollectionAsList(FieldAndFormat::new); } @Override @@ -228,13 +223,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalWriteable(highlightBuilder); out.writeOptionalWriteable(innerCollapseBuilder); - - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(fetchFields != null); - if (fetchFields != null) { - out.writeCollection(fetchFields); - } - } + out.writeOptionalCollection(fetchFields); } public String getName() { diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java index da1a760d1414b..139d5c2c2a2e6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java @@ -66,9 +66,7 @@ public MatchPhrasePrefixQueryBuilder(StreamInput in) throws IOException { slop = in.readVInt(); maxExpansions = in.readVInt(); analyzer = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - this.zeroTermsQuery = ZeroTermsQueryOption.readFromStream(in); - } + zeroTermsQuery = ZeroTermsQueryOption.readFromStream(in); } @Override @@ -78,9 +76,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(slop); out.writeVInt(maxExpansions); out.writeOptionalString(analyzer); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - zeroTermsQuery.writeTo(out); - } + zeroTermsQuery.writeTo(out); } /** Returns the field name used in this query. */ diff --git a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java index fcf986191da23..381245ab20974 100644 --- a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java @@ -76,9 +76,7 @@ public PrefixQueryBuilder(StreamInput in) throws IOException { fieldName = in.readString(); value = in.readString(); rewrite = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - caseInsensitive = in.readBoolean(); - } + caseInsensitive = in.readBoolean(); } @Override @@ -86,9 +84,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeString(value); out.writeOptionalString(rewrite); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(caseInsensitive); - } + out.writeBoolean(caseInsensitive); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java index 461dc66322434..ff3d63d4c2549 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java @@ -88,9 +88,7 @@ public RegexpQueryBuilder(StreamInput in) throws IOException { syntaxFlagsValue = in.readVInt(); maxDeterminizedStates = in.readVInt(); rewrite = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - caseInsensitive = in.readBoolean(); - } + caseInsensitive = in.readBoolean(); } @Override @@ -100,9 +98,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(syntaxFlagsValue); out.writeVInt(maxDeterminizedStates); out.writeOptionalString(rewrite); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(caseInsensitive); - } + out.writeBoolean(caseInsensitive); } /** Returns the field name used in this query. */ diff --git a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java index 113f66f3e58de..3b2a444e3fc7b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java @@ -89,17 +89,13 @@ public boolean caseInsensitive() { */ public TermQueryBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - caseInsensitive = in.readBoolean(); - } + caseInsensitive = in.readBoolean(); } @Override protected void doWriteTo(StreamOutput out) throws IOException { super.doWriteTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(caseInsensitive); - } + out.writeBoolean(caseInsensitive); } public static TermQueryBuilder fromXContent(XContentParser parser) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index 419195e5e5ba5..fed6c3df15587 100644 --- a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -86,9 +86,7 @@ public WildcardQueryBuilder(StreamInput in) throws IOException { fieldName = in.readString(); value = in.readString(); rewrite = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - caseInsensitive = in.readBoolean(); - } + caseInsensitive = in.readBoolean(); } @Override @@ -96,9 +94,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeString(value); out.writeOptionalString(rewrite); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(caseInsensitive); - } + out.writeBoolean(caseInsensitive); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index ec260a40452b5..5f7d1e1106a16 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1489,7 +1489,7 @@ public void flush(FlushRequest request, ActionListener listener) { } /** - * @return true the shard has a translog. + * @return true the shard has a translog. In the case there is no translog, the shard is not writeable. */ public boolean hasTranslog() { return translogConfig.hasTranslog(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 06f9b3e6c8943..89d9a780728fb 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -59,6 +59,7 @@ import java.util.function.BiConsumer; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isReadOnlyVerified; import static org.elasticsearch.common.lucene.Lucene.indexWriterConfigWithNoMerging; import static org.elasticsearch.core.TimeValue.timeValueMillis; @@ -625,7 +626,9 @@ private static void bootstrap(final IndexShard indexShard) throws IOException { try { final var translogLocation = indexShard.shardPath().resolveTranslog(); if (indexShard.hasTranslog() == false) { - Translog.deleteAll(translogLocation); + if (isReadOnlyVerified(indexShard.indexSettings().getIndexMetadata())) { + Translog.deleteAll(translogLocation); + } return; } store.bootstrapNewHistory(); diff --git a/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java b/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java index 0a56db56b2c95..dc5f0fdf03354 100644 --- a/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java +++ b/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java @@ -66,11 +66,7 @@ public IndexingPressureStats(StreamInput in) throws IOException { primaryRejections = in.readVLong(); replicaRejections = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - memoryLimit = in.readVLong(); - } else { - memoryLimit = -1L; - } + memoryLimit = in.readVLong(); // These are not currently propagated across the network yet this.totalCoordinatingOps = 0; @@ -168,9 +164,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(primaryRejections); out.writeVLong(replicaRejections); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeVLong(memoryLimit); - } + out.writeVLong(memoryLimit); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeVLong(primaryDocumentRejections); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 64bbd15198b4b..af28bc3bb32d3 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -38,6 +38,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -1572,6 +1573,7 @@ private IndexWriter newTemporaryEmptyIndexWriter(final Directory dir, final Vers } private IndexWriterConfig newTemporaryIndexWriterConfig() { + assert assertIndexWriter(indexSettings); // this config is only used for temporary IndexWriter instances, used to initialize the index or update the commit data, // so we don't want any merges to happen var iwc = indexWriterConfigWithNoMerging(null).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD).setCommitOnClose(false); @@ -1581,4 +1583,17 @@ private IndexWriterConfig newTemporaryIndexWriterConfig() { } return iwc; } + + private static boolean assertIndexWriter(IndexSettings indexSettings) { + final var version = IndexMetadata.SETTING_INDEX_VERSION_COMPATIBILITY.get(indexSettings.getSettings()); + assert version.onOrAfter(IndexVersions.MINIMUM_COMPATIBLE) + : "index created on version [" + + indexSettings.getIndexVersionCreated() + + "] with compatibility version [" + + version + + "] cannot be written by current version [" + + IndexVersion.current() + + ']'; + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java index 4280faf9fff97..85321c857fa48 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java @@ -29,7 +29,6 @@ public class StoreStats implements Writeable, ToXContentFragment { */ public static final long UNKNOWN_RESERVED_BYTES = -1L; - public static final TransportVersion RESERVED_BYTES_VERSION = TransportVersions.V_7_9_0; public static final TransportVersion TOTAL_DATA_SET_SIZE_SIZE_VERSION = TransportVersions.V_7_13_0; private long sizeInBytes; @@ -47,11 +46,7 @@ public StoreStats(StreamInput in) throws IOException { } else { totalDataSetSizeInBytes = sizeInBytes; } - if (in.getTransportVersion().onOrAfter(RESERVED_BYTES_VERSION)) { - reservedSizeInBytes = in.readZLong(); - } else { - reservedSizeInBytes = UNKNOWN_RESERVED_BYTES; - } + reservedSizeInBytes = in.readZLong(); } /** @@ -115,9 +110,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TOTAL_DATA_SET_SIZE_SIZE_VERSION)) { out.writeVLong(totalDataSetSizeInBytes); } - if (out.getTransportVersion().onOrAfter(RESERVED_BYTES_VERSION)) { - out.writeZLong(reservedSizeInBytes); - } + out.writeZLong(reservedSizeInBytes); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java index 280e319335b12..8e26443044ecd 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; import java.nio.file.Path; @@ -149,7 +150,10 @@ public boolean fsync() { * translog, the shard is not writeable. */ public boolean hasTranslog() { - // Expect no translog files to exist for searchable snapshots - return false == indexSettings.getIndexMetadata().isSearchableSnapshot(); + var compatibilityVersion = indexSettings.getIndexMetadata().getCompatibilityVersion(); + if (compatibilityVersion.before(IndexVersions.MINIMUM_COMPATIBLE) || indexSettings.getIndexMetadata().isSearchableSnapshot()) { + return false; + } + return true; } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index f22a99cb27faf..0a3baf2c52f57 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -96,6 +97,7 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.NoOpEngine; +import org.elasticsearch.index.engine.ReadOnlyEngine; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; @@ -125,6 +127,7 @@ import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -179,11 +182,15 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyList; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isFullySupportedVersion; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isReadOnlySupportedVersion; import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; import static org.elasticsearch.index.IndexService.IndexCreationContext.METADATA_VERIFICATION; +import static org.elasticsearch.index.IndexVersions.MINIMUM_COMPATIBLE; +import static org.elasticsearch.index.IndexVersions.MINIMUM_READONLY_COMPATIBLE; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; @@ -801,6 +808,22 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { .filter(maybe -> Objects.requireNonNull(maybe).isPresent()) .toList(); if (engineFactories.isEmpty()) { + if (indexMetadata == null || isFullySupportedVersion(indexMetadata, MINIMUM_COMPATIBLE)) { + return new InternalEngineFactory(); + } else if (isReadOnlySupportedVersion(indexMetadata, MINIMUM_COMPATIBLE, MINIMUM_READONLY_COMPATIBLE)) { + return config -> { + return new ReadOnlyEngine( + config, + null, + config.getTranslogConfig().hasTranslog() ? null : new TranslogStats(0, 0, 0, 0, 0), + true, + Function.identity(), + true, + true + ); + }; + } + assert false : "unsupported: " + Strings.toString(indexMetadata); return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { assert engineFactories.get(0).isPresent(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index b0d33a75ba883..01622ce2f6dae 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -21,7 +21,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -781,23 +780,13 @@ public RecoveryFilesDetails() { RecoveryFilesDetails(StreamInput in) throws IOException { fileDetails = in.readMapValues(FileDetail::new, FileDetail::name); - if (in.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - complete = in.readBoolean(); - } else { - // This flag is used by disk-based allocation to decide whether the remaining bytes measurement is accurate or not; if not - // then it falls back on an estimate. There's only a very short window in which the file details are present but incomplete - // so this is a reasonable approximation, and the stats reported to the disk-based allocator don't hit this code path - // anyway since they always use IndexShard#getRecoveryState which is never transported over the wire. - complete = fileDetails.isEmpty() == false; - } + complete = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(values()); - if (out.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - out.writeBoolean(complete); - } + out.writeBoolean(complete); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 362a62c838e3b..e297ddbf03998 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -51,6 +51,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isReadOnlyVerified; import static org.elasticsearch.core.Strings.format; /** @@ -642,7 +643,9 @@ private static void bootstrap(final IndexShard indexShard, long globalCheckpoint assert localCheckpoint == globalCheckpoint : localCheckpoint + " != " + globalCheckpoint; } } - Translog.deleteAll(translogLocation); + if (isReadOnlyVerified(indexShard.indexSettings().getIndexMetadata())) { + Translog.deleteAll(translogLocation); + } return; } final String translogUUID = Translog.createEmptyTranslog( diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index b6dbc4e9626c3..eba96e15de50c 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNode; @@ -174,8 +175,12 @@ public void clusterChanged(ClusterChangedEvent event) { ); switch (shardDeletionCheckResult) { case FOLDER_FOUND_CAN_DELETE: + var clusterState = event.state(); + var clusterName = clusterState.getClusterName(); + var nodes = clusterState.nodes(); + var clusterStateVersion = clusterState.getVersion(); indicesClusterStateService.onClusterStateShardsClosed( - () -> deleteShardIfExistElseWhere(event.state(), indexShardRoutingTable) + () -> deleteShardIfExistElseWhere(clusterName, nodes, clusterStateVersion, indexShardRoutingTable) ); break; case NO_FOLDER_FOUND: @@ -218,14 +223,18 @@ static boolean shardCanBeDeleted(String localNodeId, IndexShardRoutingTable inde return true; } - private void deleteShardIfExistElseWhere(ClusterState state, IndexShardRoutingTable indexShardRoutingTable) { + private void deleteShardIfExistElseWhere( + ClusterName clusterName, + DiscoveryNodes nodes, + long clusterStateVersion, + IndexShardRoutingTable indexShardRoutingTable + ) { List> requests = new ArrayList<>(indexShardRoutingTable.size()); String indexUUID = indexShardRoutingTable.shardId().getIndex().getUUID(); - ClusterName clusterName = state.getClusterName(); for (int copy = 0; copy < indexShardRoutingTable.size(); copy++) { ShardRouting shardRouting = indexShardRoutingTable.shard(copy); assert shardRouting.started() : "expected started shard but was " + shardRouting; - DiscoveryNode currentNode = state.nodes().get(shardRouting.currentNodeId()); + DiscoveryNode currentNode = nodes.get(shardRouting.currentNodeId()); requests.add( new Tuple<>(currentNode, new ShardActiveRequest(clusterName, indexUUID, shardRouting.shardId(), deleteShardTimeout)) ); @@ -233,7 +242,7 @@ private void deleteShardIfExistElseWhere(ClusterState state, IndexShardRoutingTa ShardActiveResponseHandler responseHandler = new ShardActiveResponseHandler( indexShardRoutingTable.shardId(), - state.getVersion(), + clusterStateVersion, requests.size() ); for (Tuple request : requests) { diff --git a/server/src/main/java/org/elasticsearch/inference/TaskType.java b/server/src/main/java/org/elasticsearch/inference/TaskType.java index f319157828dfa..17e77be43bd1a 100644 --- a/server/src/main/java/org/elasticsearch/inference/TaskType.java +++ b/server/src/main/java/org/elasticsearch/inference/TaskType.java @@ -29,7 +29,8 @@ public enum TaskType implements Writeable { public boolean isAnyOrSame(TaskType other) { return true; } - }; + }, + CHAT_COMPLETION; public static final String NAME = "task_type"; diff --git a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java index 5bf5182af336e..56c08f2b2fb85 100644 --- a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java +++ b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java @@ -13,7 +13,6 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.XContentBuilder; @@ -36,10 +35,6 @@ private EnterpriseGeoIpTask() { } public static final String ENTERPRISE_GEOIP_DOWNLOADER = "enterprise-geoip-downloader"; - public static final NodeFeature GEOIP_DOWNLOADER_DATABASE_CONFIGURATION = new NodeFeature( - "geoip.downloader.database.configuration", - true - ); public static class EnterpriseGeoIpTaskParams implements PersistentTaskParams { diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java b/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java deleted file mode 100644 index 7c12b180b4607..0000000000000 --- a/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -import static org.elasticsearch.ingest.EnterpriseGeoIpTask.GEOIP_DOWNLOADER_DATABASE_CONFIGURATION; - -public class IngestGeoIpFeatures implements FeatureSpecification { - - public static final NodeFeature GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE = new NodeFeature( - "get_database_configuration_action.multi_node", - true - ); - - public static final NodeFeature PUT_DATABASE_CONFIGURATION_ACTION_IPINFO = new NodeFeature( - "put_database_configuration_action.ipinfo", - true - ); - - public Set getFeatures() { - return Set.of( - GEOIP_DOWNLOADER_DATABASE_CONFIGURATION, - GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE, - PUT_DATABASE_CONFIGURATION_ACTION_IPINFO - ); - } -} diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 5e9f7bb99b31c..4693b4fcf718a 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -1200,7 +1200,6 @@ public Map searchFields() { DataStreamAutoShardingService dataStreamAutoShardingService = new DataStreamAutoShardingService( settings, clusterService, - featureService, threadPool::absoluteTimeInMillis ); dataStreamAutoShardingService.init(); @@ -1351,7 +1350,7 @@ private Module loadDiagnosticServices( var serverHealthIndicatorServices = Stream.of( new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), - new RepositoryIntegrityHealthIndicatorService(clusterService, featureService), + new RepositoryIntegrityHealthIndicatorService(clusterService), new DiskHealthIndicatorService(clusterService, featureService), new ShardsCapacityHealthIndicatorService(clusterService, featureService), fileSettingsHealthIndicatorService @@ -1376,14 +1375,7 @@ private Module loadDiagnosticServices( new DiskHealthTracker(nodeService, clusterService), new RepositoriesHealthTracker(repositoriesService) ); - LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create( - settings, - clusterService, - threadPool, - client, - featureService, - healthTrackers - ); + LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create(settings, clusterService, threadPool, client, healthTrackers); HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService); return b -> { diff --git a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java index de56ead9b5aba..1a169699d4131 100644 --- a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java +++ b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java @@ -22,9 +22,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; -import org.elasticsearch.reservedstate.service.FileSettingsFeatures; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.shutdown.PluginShutdownService; import org.elasticsearch.transport.BindTransportException; @@ -280,22 +278,7 @@ private boolean isMasterElected(ClusterState clusterState) { // protected to allow mock service to override protected boolean areFileSettingsApplied(ClusterState clusterState) { ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); - if (fileSettingsMetadata == null) { - // In order to block readiness on file settings being applied, we need to know that the master node has written an initial - // version, or a marker that file settings don't exist. When upgrading from a version that did not have file settings, the - // current master node may not be the first node upgraded. To be safe, we wait to consider file settings application for - // readiness until the whole cluster supports file settings. Note that this only applies when no reserved state metadata - // exists, so either we are starting up a current cluster (and the feature will be found) or we are upgrading from - // a version before file settings existed (before 8.4). - return supportsFileSettings(clusterState) == false; - } else { - return fileSettingsMetadata.version().equals(ReservedStateMetadata.NO_VERSION) == false; - } - } - - @SuppressForbidden(reason = "need to check file settings support on exact cluster state") - private boolean supportsFileSettings(ClusterState clusterState) { - return clusterState.clusterFeatures().clusterHasFeature(clusterState.nodes(), FileSettingsFeatures.FILE_SETTINGS_SUPPORTED); + return fileSettingsMetadata != null && fileSettingsMetadata.version().equals(ReservedStateMetadata.NO_VERSION) == false; } private void setReady(boolean ready) { diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java deleted file mode 100644 index b6dea6a2003fc..0000000000000 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.repositories; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class RepositoriesFeatures implements FeatureSpecification { - public static final NodeFeature SUPPORTS_REPOSITORIES_USAGE_STATS = new NodeFeature("repositories.supports_usage_stats", true); - - @Override - public Set getFeatures() { - return Set.of(SUPPORTS_REPOSITORIES_USAGE_STATS); - } -} diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsFeatures.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsFeatures.java deleted file mode 100644 index a60f525be988a..0000000000000 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsFeatures.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.reservedstate.service; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class FileSettingsFeatures implements FeatureSpecification { - - // Although file settings were supported starting in 8.4.0, this is really about whether file settings - // are used in readiness. - public static final NodeFeature FILE_SETTINGS_SUPPORTED = new NodeFeature("file_settings", true); - - @Override - public Set getFeatures() { - return Set.of(FILE_SETTINGS_SUPPORTED); - } -} diff --git a/server/src/main/java/org/elasticsearch/rest/RestFeatures.java b/server/src/main/java/org/elasticsearch/rest/RestFeatures.java deleted file mode 100644 index e72b30526c8e3..0000000000000 --- a/server/src/main/java/org/elasticsearch/rest/RestFeatures.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.rest; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction; - -import java.util.Set; - -import static org.elasticsearch.search.fetch.subphase.highlight.DefaultHighlighter.UNIFIED_HIGHLIGHTER_MATCHED_FIELDS; - -public class RestFeatures implements FeatureSpecification { - @Override - public Set getFeatures() { - return Set.of( - RestNodesCapabilitiesAction.CAPABILITIES_ACTION, - RestNodesCapabilitiesAction.LOCAL_ONLY_CAPABILITIES, - UNIFIED_HIGHLIGHTER_MATCHED_FIELDS - ); - } -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesCapabilitiesAction.java index 265cdd5979adf..7d660e527a814 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesCapabilitiesAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.cluster.node.capabilities.NodesCapabilitiesRequest; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -30,8 +29,6 @@ @ServerlessScope(Scope.INTERNAL) public class RestNodesCapabilitiesAction extends BaseRestHandler { - public static final NodeFeature CAPABILITIES_ACTION = new NodeFeature("rest.capabilities_action", true); - public static final NodeFeature LOCAL_ONLY_CAPABILITIES = new NodeFeature("rest.local_only_capabilities", true); private static final Set SUPPORTED_QUERY_PARAMETERS = Set.of( "timeout", "method", diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index ec8bb6285bdd4..da7a7d3379ee0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -14,16 +14,13 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.List; -import java.util.function.Predicate; import static org.elasticsearch.rest.RestUtils.getAckTimeout; import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @@ -33,11 +30,6 @@ public class RestUpdateDesiredNodesAction extends BaseRestHandler { private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestUpdateDesiredNodesAction.class); private static final String VERSION_DEPRECATION_MESSAGE = "[version removal] Specifying node_version in desired nodes requests is deprecated."; - private final Predicate clusterSupportsFeature; - - public RestUpdateDesiredNodesAction(Predicate clusterSupportsFeature) { - this.clusterSupportsFeature = clusterSupportsFeature; - } @Override public String getName() { @@ -67,14 +59,8 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli ); } - if (clusterSupportsFeature.test(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED)) { - if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) { - deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE); - } - } else { - if (updateDesiredNodesRequest.getNodes().stream().anyMatch(n -> n.hasVersion() == false)) { - throw new XContentParseException("[node_version] field is required and must have a valid value"); - } + if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) { + deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE); } return restChannel -> client.execute( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java index 9be3462e97e0c..e49efc80250ac 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -48,11 +48,10 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - final GetIndexRequest getIndexRequest = new GetIndexRequest(); + final GetIndexRequest getIndexRequest = new GetIndexRequest(getMasterNodeTimeout(request)); getIndexRequest.indices(indices); getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); getIndexRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); getIndexRequest.features(GetIndexRequest.Feature.fromRequest(request)); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptFeatures.java b/server/src/main/java/org/elasticsearch/script/ScriptFeatures.java index 88756ddbf4ed5..cd781e42379d1 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptFeatures.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptFeatures.java @@ -17,6 +17,6 @@ public final class ScriptFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(VectorScoreScriptUtils.HAMMING_DISTANCE_FUNCTION, ScriptTermStats.TERM_STAT_FEATURE); + return Set.of(); } } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptTermStats.java b/server/src/main/java/org/elasticsearch/script/ScriptTermStats.java index 82f6e972e1266..9c51afce3a49d 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptTermStats.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptTermStats.java @@ -16,7 +16,6 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.util.CachedSupplier; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.search.internal.ContextIndexSearcher; import java.io.IOException; @@ -30,8 +29,6 @@ */ public class ScriptTermStats { - public static final NodeFeature TERM_STAT_FEATURE = new NodeFeature("script.term_stats", true); - private final IntSupplier docIdSupplier; private final Term[] terms; private final IndexSearcher searcher; diff --git a/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java index 9b4d105eea100..bdebdcc1eecb4 100644 --- a/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java @@ -10,7 +10,6 @@ package org.elasticsearch.script; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.script.field.vectors.DenseVector; import org.elasticsearch.script.field.vectors.DenseVectorDocValuesField; @@ -21,8 +20,6 @@ public class VectorScoreScriptUtils { - public static final NodeFeature HAMMING_DISTANCE_FUNCTION = new NodeFeature("script.hamming", true); - public static class DenseVectorFunction { protected final ScoreScript scoreScript; protected final DenseVectorDocValuesField field; diff --git a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java index 553511346b182..98dd7f9388c1f 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java +++ b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java @@ -11,7 +11,6 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import java.util.Set; @@ -21,7 +20,7 @@ public final class SearchFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(KnnVectorQueryBuilder.K_PARAM_SUPPORTED, LUCENE_10_0_0_UPGRADE); + return Set.of(LUCENE_10_0_0_UPGRADE); } public static final NodeFeature RETRIEVER_RESCORER_ENABLED = new NodeFeature("search.retriever.rescorer.enabled"); diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index d983dd1ff78d4..6716c03a3a935 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -1157,6 +1157,10 @@ private void registerQueryParsers(List plugins) { ); registerQuery( new QuerySpec<>( + /* + * Deprecated in #64227, 7.12/8.0. We do not plan to remove this so we + * do not break any users using this. + */ (new ParseField(GeoPolygonQueryBuilder.NAME).withAllDeprecated(GeoPolygonQueryBuilder.GEO_POLYGON_DEPRECATION_MSG)), GeoPolygonQueryBuilder::new, GeoPolygonQueryBuilder::fromXContent diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java index 915607efd04cc..06aaf9d365e0a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java @@ -235,6 +235,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index cfd8dd78595f3..d20f768bedb43 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -48,6 +48,11 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde public static final String NAME = "geo_distance"; public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(NAME, GeoDistanceAggregatorSupplier.class); + /** + * The point from which to measure the distance. This has many other names that have been + * deprecated since 2014, but we have no plans to remove these names so we don't break anyone + * using them. + */ static final ParseField ORIGIN_FIELD = new ParseField("origin", "center", "point", "por"); static final ParseField UNIT_FIELD = new ParseField("unit"); static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 2720ffdb5f7d4..560eb61c7d7a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -37,6 +37,12 @@ public final class CardinalityAggregationBuilder extends ValuesSourceAggregation public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(NAME, CardinalityAggregatorSupplier.class); + /** + * Pre-2.0 rehashing was configurable, but it hasn't been for ~10 years. We always rehash because it's + * quite cheap. Attempting to enable or disable it is just a noop with a deprecation message. We have + * no plans to remove this parameter because it isn't worth breaking even the tiny fraction of users + * who are sending it. Deprecation was in #12931. + */ private static final ParseField REHASH = new ParseField("rehash").withAllDeprecated("no replacement - values will always be rehashed"); public static final ParseField PRECISION_THRESHOLD_FIELD = new ParseField("precision_threshold"); public static final ParseField EXECUTION_HINT_FIELD_NAME = new ParseField("execution_hint"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java index 7c937acdd92dc..f62e3394abd94 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java @@ -23,6 +23,10 @@ public enum PercentilesMethod implements Writeable { /** * The TDigest method for calculating percentiles + *

+ * The {@code TDigest} and {@code TDIGEST} names have been deprecated since 8.0, + * but we don't have any plans to remove it so we don't break anyone using it. + *

*/ TDIGEST("tdigest", "TDigest", "TDIGEST") { @Override @@ -32,6 +36,10 @@ PercentilesConfig configFromStream(StreamInput in) throws IOException { }, /** * The HDRHistogram method of calculating percentiles + *

+ * The {@code HDR} name has been deprecated since 8.0, but we don't have any plans + * to remove it so we don't break anyone using it. + *

*/ HDR("hdr", "HDR") { @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index 7c4def5aa2743..103a23bf8ef9a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -52,6 +52,11 @@ public enum ValueType implements Writeable { private final byte id; private final String preferredName; + /** + * Name of the {@code value_type} field in the JSON. The name {@code valueType} has + * been deprecated since before #22160, but we have no plans to remove it so we don't + * break anyone that might be using it. + */ public static final ParseField VALUE_TYPE = new ParseField("value_type", "valueType"); ValueType(byte id, String description, String preferredName, ValuesSourceType valuesSourceType, DocValueFormat defaultFormat) { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 8c21abe4180ea..6d47493e4d063 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -1402,9 +1402,6 @@ private SearchSourceBuilder parseXContent( } } else if (token == XContentParser.Token.START_OBJECT) { if (RETRIEVER.match(currentFieldName, parser.getDeprecationHandler())) { - if (clusterSupportsFeature.test(RetrieverBuilder.RETRIEVERS_SUPPORTED) == false) { - throw new ParsingException(parser.getTokenLocation(), "Unknown key for a START_OBJECT in [retriever]."); - } retrieverBuilder = RetrieverBuilder.parseTopLevelRetrieverBuilder( parser, new RetrieverParserContext(searchUsage, clusterSupportsFeature) diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java index b16617e2eb4d9..c47f815c18639 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.text.Text; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -51,8 +50,6 @@ public class DefaultHighlighter implements Highlighter { - public static final NodeFeature UNIFIED_HIGHLIGHTER_MATCHED_FIELDS = new NodeFeature("unified_highlighter_matched_fields", true); - @Override public boolean canHighlight(MappedFieldType fieldType) { return true; diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java index ecc03d05b28a6..737d2aa397c34 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java @@ -10,8 +10,6 @@ package org.elasticsearch.search.retriever; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; @@ -46,7 +44,6 @@ public final class KnnRetrieverBuilder extends RetrieverBuilder { public static final String NAME = "knn"; - public static final NodeFeature KNN_RETRIEVER_SUPPORTED = new NodeFeature("knn_retriever_supported", true); public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField K_FIELD = new ParseField("k"); @@ -103,9 +100,6 @@ public final class KnnRetrieverBuilder extends RetrieverBuilder { } public static KnnRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { - if (context.clusterSupportsFeature(KNN_RETRIEVER_SUPPORTED) == false) { - throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); - } return PARSER.apply(parser, context); } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index 357555cc59942..ce852a44c28ec 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; @@ -50,8 +49,6 @@ */ public abstract class RetrieverBuilder implements Rewriteable, ToXContent { - public static final NodeFeature RETRIEVERS_SUPPORTED = new NodeFeature("retrievers_supported", true); - public static final ParseField PRE_FILTER_FIELD = new ParseField("filter"); public static final ParseField MIN_SCORE_FIELD = new ParseField("min_score"); diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java index 74a8b30c8e7dc..bfd6f572a9e65 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java @@ -22,10 +22,6 @@ public class RetrieversFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of( - RetrieverBuilder.RETRIEVERS_SUPPORTED, - StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED, - KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED - ); + return Set.of(); } } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java index 2ffb9e3a98028..3ca74dc133d47 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java @@ -9,8 +9,6 @@ package org.elasticsearch.search.retriever; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -39,7 +37,6 @@ public final class StandardRetrieverBuilder extends RetrieverBuilder implements ToXContent { public static final String NAME = "standard"; - public static final NodeFeature STANDARD_RETRIEVER_SUPPORTED = new NodeFeature("standard_retriever_supported", true); public static final ParseField QUERY_FIELD = new ParseField("query"); public static final ParseField SEARCH_AFTER_FIELD = new ParseField("search_after"); @@ -81,9 +78,6 @@ public final class StandardRetrieverBuilder extends RetrieverBuilder implements } public static StandardRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { - if (context.clusterSupportsFeature(STANDARD_RETRIEVER_SUPPORTED) == false) { - throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); - } return PARSER.apply(parser, context); } diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 2aaade35fb8f3..0bd8cc44edb30 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -74,6 +74,11 @@ public class GeoDistanceSortBuilder extends SortBuilder private static final ParseField UNIT_FIELD = new ParseField("unit"); private static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type"); private static final ParseField VALIDATION_METHOD_FIELD = new ParseField("validation_method"); + /** + * Name for the sort {@link SortMode} which is mostly about sorting on multivalued fields. + * The {@code sort_mode} name has been deprecated since 5.0, but we don't plan to remove + * this so we don't break anyone using this. + */ private static final ParseField SORTMODE_FIELD = new ParseField("mode", "sort_mode"); private static final ParseField IGNORE_UNMAPPED = new ParseField("ignore_unmapped"); diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java index 193191658af08..565fd7325a5ac 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; @@ -56,8 +55,6 @@ * {@link org.apache.lucene.search.KnnByteVectorQuery}. */ public class KnnVectorQueryBuilder extends AbstractQueryBuilder { - public static final NodeFeature K_PARAM_SUPPORTED = new NodeFeature("search.vectors.k_param_supported", true); - public static final String NAME = "knn"; private static final int NUM_CANDS_LIMIT = 10_000; private static final float NUM_CANDS_MULTIPLICATIVE_FACTOR = 1.5f; diff --git a/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java index 4be1ee9ddc513..a6a7b458c216d 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java @@ -13,9 +13,7 @@ import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorResult; @@ -100,11 +98,9 @@ public class RepositoryIntegrityHealthIndicatorService implements HealthIndicato ); private final ClusterService clusterService; - private final FeatureService featureService; - public RepositoryIntegrityHealthIndicatorService(ClusterService clusterService, FeatureService featureService) { + public RepositoryIntegrityHealthIndicatorService(ClusterService clusterService) { this.clusterService = clusterService; - this.featureService = featureService; } @Override @@ -175,15 +171,8 @@ private RepositoryHealthAnalyzer( || invalidRepositories.isEmpty() == false) { healthStatus = YELLOW; } else if (repositoriesHealthByNode.isEmpty()) { - clusterHasFeature = featureService.clusterHasFeature( - clusterState, - HealthFeatures.SUPPORTS_EXTENDED_REPOSITORY_INDICATOR - ) == false; - if (clusterHasFeature) { - healthStatus = GREEN; - } else { - healthStatus = UNKNOWN; - } + clusterHasFeature = false; + healthStatus = UNKNOWN; } else { healthStatus = GREEN; } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 00146844043b0..faafdf7d71e33 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -114,7 +114,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements protected final NetworkService networkService; protected final Set profileSettingsSet; protected final boolean rstOnClose; - private final TransportVersion version; private final CircuitBreakerService circuitBreakerService; private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); @@ -148,7 +147,6 @@ public TcpTransport( ) { this.settings = settings; this.profileSettingsSet = getProfileSettings(settings); - this.version = version; this.threadPool = threadPool; this.circuitBreakerService = circuitBreakerService; this.networkService = networkService; @@ -199,11 +197,6 @@ public TcpTransport( ); } - @Override - public TransportVersion getVersion() { - return version; - } - public StatsTracker getStatsTracker() { return statsTracker; } diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index a477635835746..a1d35ce3f255a 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -50,10 +50,6 @@ default boolean isSecure() { return false; } - default TransportVersion getVersion() { - return TransportVersion.current(); - } - /** * The address the transport is bound on. */ diff --git a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java index ec9b30e71e62c..c4fdf05f5640c 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java @@ -121,12 +121,12 @@ final class TransportHandshaker { * [3] Parent task ID should be empty; see org.elasticsearch.tasks.TaskId.writeTo for its structure. */ - static final TransportVersion EARLIEST_HANDSHAKE_VERSION = TransportVersion.fromId(6080099); - static final TransportVersion REQUEST_HANDSHAKE_VERSION = TransportVersions.MINIMUM_COMPATIBLE; + static final TransportVersion V7_HANDSHAKE_VERSION = TransportVersion.fromId(6_08_00_99); + static final TransportVersion V8_HANDSHAKE_VERSION = TransportVersion.fromId(7_17_00_99); static final TransportVersion V9_HANDSHAKE_VERSION = TransportVersion.fromId(8_800_00_0); static final Set ALLOWED_HANDSHAKE_VERSIONS = Set.of( - EARLIEST_HANDSHAKE_VERSION, - REQUEST_HANDSHAKE_VERSION, + V7_HANDSHAKE_VERSION, + V8_HANDSHAKE_VERSION, V9_HANDSHAKE_VERSION ); @@ -166,7 +166,7 @@ void sendHandshake( ); boolean success = false; try { - handshakeRequestSender.sendRequest(node, channel, requestId, REQUEST_HANDSHAKE_VERSION); + handshakeRequestSender.sendRequest(node, channel, requestId, V8_HANDSHAKE_VERSION); threadPool.schedule( () -> handler.handleLocalException(new ConnectTransportException(node, "handshake_timeout[" + timeout + "]")), diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index 8fa188efbd4a3..a4f606b54827b 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -7,21 +7,13 @@ # License v3.0 only", or the "Server Side Public License, v 1". # -org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures org.elasticsearch.action.bulk.BulkFeatures org.elasticsearch.features.FeatureInfrastructureFeatures -org.elasticsearch.health.HealthFeatures -org.elasticsearch.cluster.metadata.MetadataFeatures -org.elasticsearch.rest.RestFeatures -org.elasticsearch.repositories.RepositoriesFeatures -org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures org.elasticsearch.index.IndexFeatures org.elasticsearch.index.mapper.MapperFeatures -org.elasticsearch.ingest.IngestGeoIpFeatures org.elasticsearch.search.SearchFeatures org.elasticsearch.search.retriever.RetrieversFeatures org.elasticsearch.script.ScriptFeatures -org.elasticsearch.reservedstate.service.FileSettingsFeatures org.elasticsearch.cluster.routing.RoutingFeatures org.elasticsearch.action.admin.cluster.stats.ClusterStatsFeatures diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsActionTests.java index f3d8f8860ba83..d9bf3e0e99c81 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsActionTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsTests; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ClusterServiceUtils; @@ -35,8 +34,6 @@ import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.not; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; @@ -48,7 +45,6 @@ public class TransportGetAllocationStatsActionTests extends ESTestCase { private ClusterService clusterService; private TransportService transportService; private AllocationStatsService allocationStatsService; - private FeatureService featureService; private TransportGetAllocationStatsAction action; @@ -67,15 +63,13 @@ public void setUp() throws Exception { Set.of() ); allocationStatsService = mock(AllocationStatsService.class); - featureService = mock(FeatureService.class); action = new TransportGetAllocationStatsAction( transportService, clusterService, threadPool, new ActionFilters(Set.of()), null, - allocationStatsService, - featureService + allocationStatsService ); } @@ -99,8 +93,6 @@ public void testReturnsOnlyRequestedStats() throws Exception { ); when(allocationStatsService.stats()).thenReturn(Map.of(randomIdentifier(), NodeAllocationStatsTests.randomNodeAllocationStats())); - when(featureService.clusterHasFeature(any(ClusterState.class), eq(AllocationStatsFeatures.INCLUDE_DISK_THRESHOLD_SETTINGS))) - .thenReturn(true); var future = new PlainActionFuture(); action.masterOperation(mock(Task.class), request, ClusterState.EMPTY_STATE, future); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java index 32269766011cb..e36cbd4fe93a3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java @@ -80,7 +80,7 @@ public void tearDown() throws Exception { } public void testIncludeDefaults() { - GetIndexRequest defaultsRequest = new GetIndexRequest().indices(indexName).includeDefaults(true); + GetIndexRequest defaultsRequest = new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(indexName).includeDefaults(true); ActionTestUtils.execute( getIndexAction, null, @@ -95,7 +95,7 @@ public void testIncludeDefaults() { } public void testDoNotIncludeDefaults() { - GetIndexRequest noDefaultsRequest = new GetIndexRequest().indices(indexName); + GetIndexRequest noDefaultsRequest = new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(indexName); ActionTestUtils.execute( getIndexAction, null, diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java index 35a7e4d655f57..ec163e34ce0a1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java @@ -75,7 +75,7 @@ public void testInvalidFeatures() { } public void testIndicesOptions() { - GetIndexRequest getIndexRequest = new GetIndexRequest(); + GetIndexRequest getIndexRequest = new GetIndexRequest(TEST_REQUEST_TIMEOUT); assertThat( getIndexRequest.indicesOptions().concreteTargetOptions(), equalTo(IndicesOptions.strictExpandOpen().concreteTargetOptions()) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index f04180bde30f2..657a03066ada9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.cache.query.QueryCacheStats; @@ -121,7 +120,6 @@ public class TransportRolloverActionTests extends ESTestCase { final DataStreamAutoShardingService dataStreamAutoShardingService = new DataStreamAutoShardingService( Settings.EMPTY, mockClusterService, - new FeatureService(List.of()), System::currentTimeMillis ); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 50885fc399c89..c6f923ce7cc03 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -153,7 +153,6 @@ class TestTransportBulkAction extends TransportBulkAction { transportService, TransportBulkActionIngestTests.this.clusterService, ingestService, - mockFeatureService, new NodeClient(Settings.EMPTY, TransportBulkActionIngestTests.this.threadPool), new ActionFilters(Collections.emptySet()), TestIndexNameExpressionResolver.newInstance(), diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 587bc2e3ba333..616488d3472ff 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -113,7 +113,6 @@ class TestTransportBulkAction extends TransportBulkAction { transportService, TransportBulkActionTests.this.clusterService, null, - mockFeatureService, new NodeClient(Settings.EMPTY, TransportBulkActionTests.this.threadPool), new ActionFilters(Collections.emptySet()), new Resolver(), diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 2f033e4b5a383..544a373675ee5 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -247,7 +247,6 @@ static class TestTransportBulkAction extends TransportBulkAction { transportService, clusterService, null, - null, client, actionFilters, indexNameExpressionResolver, diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java index 3a43a1df9bf88..14ecf85b3aa7e 100644 --- a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java @@ -27,9 +27,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; @@ -81,12 +78,6 @@ public void setupService() { service = new DataStreamAutoShardingService( Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build(), clusterService, - new FeatureService(List.of(new FeatureSpecification() { - @Override - public Set getFeatures() { - return Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE); - } - })), () -> now ); dataStreamName = randomAlphaOfLengthBetween(10, 100); @@ -113,14 +104,6 @@ public void testCalculateValidations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); @@ -129,12 +112,6 @@ public void testCalculateValidations() { DataStreamAutoShardingService disabledAutoshardingService = new DataStreamAutoShardingService( Settings.EMPTY, clusterService, - new FeatureService(List.of(new FeatureSpecification() { - @Override - public Set getFeatures() { - return Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE); - } - })), System::currentTimeMillis ); @@ -142,46 +119,6 @@ public Set getFeatures() { assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); } - { - // cluster doesn't have feature - ClusterState stateNoFeature = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures(Map.of("n1", Set.of(), "n2", Set.of())) - .metadata(Metadata.builder()) - .build(); - - Settings settings = Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build(); - DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( - settings, - clusterService, - new FeatureService(List.of()), - () -> now - ); - - AutoShardingResult autoShardingResult = noFeatureService.calculate(stateNoFeature, dataStream, 2.0); - assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); - } - - { - Settings settings = Settings.builder() - .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) - .putList( - DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), - List.of("foo", dataStreamName + "*") - ) - .build(); - // patterns are configured to exclude the current data stream - DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( - settings, - clusterService, - new FeatureService(List.of()), - () -> now - ); - - AutoShardingResult autoShardingResult = noFeatureService.calculate(state, dataStream, 2.0); - assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); - } - { // null write load passed AutoShardingResult autoShardingResult = service.calculate(state, dataStream, null); @@ -209,14 +146,6 @@ public void testCalculateIncreaseShardingRecommendations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); @@ -248,14 +177,6 @@ public void testCalculateIncreaseShardingRecommendations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); @@ -287,14 +208,6 @@ public void testCalculateIncreaseShardingRecommendations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); @@ -326,14 +239,6 @@ public void testCalculateDecreaseShardingRecommendations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); @@ -367,14 +272,6 @@ public void testCalculateDecreaseShardingRecommendations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); @@ -416,14 +313,6 @@ public void testCalculateDecreaseShardingRecommendations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); @@ -463,14 +352,6 @@ public void testCalculateDecreaseShardingRecommendations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); @@ -504,14 +385,6 @@ public void testCalculateDecreaseShardingRecommendations() { builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) - .nodeFeatures( - Map.of( - "n1", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), - "n2", - Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) - ) - ) .metadata(builder) .build(); diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 71bf2a47cfa47..47ff4ca6f0600 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -207,12 +207,7 @@ public void testOnPhaseFailure() { List> nodeLookups = new ArrayList<>(); ArraySearchPhaseResults phaseResults = phaseResults(requestIds, nodeLookups, 0); AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); - action.onPhaseFailure(new SearchPhase("test") { - @Override - public void run() { - - } - }, "message", null); + action.onPhaseFailure("test", "message", null); assertThat(exception.get(), instanceOf(SearchPhaseExecutionException.class)); SearchPhaseExecutionException searchPhaseExecutionException = (SearchPhaseExecutionException) exception.get(); assertEquals("message", searchPhaseExecutionException.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index 193855a4c835f..bf62973b9b052 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -141,7 +141,7 @@ public void sendExecuteQuery( ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override - public void run() throws IOException { + protected void run() { responseRef.set(((QueryPhaseResultConsumer) response).results); } }, mockSearchPhaseContext); @@ -227,7 +227,7 @@ public void sendExecuteQuery( ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override - public void run() throws IOException { + protected void run() { responseRef.set(((QueryPhaseResultConsumer) response).results); } }, mockSearchPhaseContext); @@ -315,7 +315,7 @@ public void sendExecuteQuery( ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override - public void run() throws IOException { + protected void run() { responseRef.set(((QueryPhaseResultConsumer) response).results); } }, mockSearchPhaseContext); diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index 5fb70500d515f..65fdec96c92f0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -119,7 +119,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL try { ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override - public void run() { + protected void run() { try (var sections = new SearchResponseSections(hits, null, null, false, null, null, 1)) { mockSearchPhaseContext.sendSearchResponse(sections, null); } @@ -210,7 +210,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL try { ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override - public void run() { + protected void run() { try (var sections = new SearchResponseSections(hits, null, null, false, null, null, 1)) { mockSearchPhaseContext.sendSearchResponse(sections, null); } @@ -246,7 +246,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL try { ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override - public void run() { + protected void run() { try (var sections = new SearchResponseSections(hits, null, null, false, null, null, 1)) { mockSearchPhaseContext.sendSearchResponse(sections, null); } @@ -286,7 +286,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHits hits = SearchHits.empty(new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override - public void run() { + protected void run() { mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); } }); @@ -336,7 +336,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL try { ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override - public void run() { + protected void run() { mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); } }); @@ -385,7 +385,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL try { ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override - public void run() { + protected void run() { mockSearchPhaseContext.sendSearchResponse( new SearchResponseSections(hits, null, null, false, null, null, 1), new AtomicArray<>(0) diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index dda20dfb37e9d..fd60621c7e400 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -488,7 +488,7 @@ public void sendExecuteFetch( reducedQueryPhase, (searchResponse, scrollId) -> new SearchPhase("test") { @Override - public void run() { + protected void run() { mockSearchPhaseContext.sendSearchResponse(searchResponse, null); latch.countDown(); } @@ -764,7 +764,7 @@ private static BiFunction ) { return (searchResponse, scrollId) -> new SearchPhase("test") { @Override - public void run() { + protected void run() { mockSearchPhaseContext.sendSearchResponse(searchResponse, null); } }; diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index cf65d756811ad..e8e12300c23e3 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -113,7 +113,7 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At } @Override - public void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { + public void onPhaseFailure(String phase, String msg, Throwable cause) { phaseFailure.set(cause); } @@ -135,12 +135,12 @@ public SearchTransportService getSearchTransport() { } @Override - public void executeNextPhase(SearchPhase currentPhase, Supplier nextPhaseSupplier) { + public void executeNextPhase(String currentPhase, Supplier nextPhaseSupplier) { var nextPhase = nextPhaseSupplier.get(); try { nextPhase.run(); } catch (Exception e) { - onPhaseFailure(nextPhase, "phase failed", e); + onPhaseFailure(nextPhase.getName(), "phase failed", e); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 2361beb7ad036..7e9e6f623cab0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -140,7 +140,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase() { return new SearchPhase("test") { @Override - public void run() { + protected void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); latch.countDown(); } @@ -255,7 +255,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase() { return new SearchPhase("test") { @Override - public void run() { + protected void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); latch.countDown(); } @@ -363,7 +363,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase() { return new SearchPhase("test") { @Override - public void run() { + protected void run() { for (int i = 0; i < results.getNumShards(); i++) { TestSearchPhaseResult result = results.getAtomicArray().get(i); assertEquals(result.node.getId(), result.getSearchShardTarget().getNodeId()); @@ -497,7 +497,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase() { return new SearchPhase("test") { @Override - public void run() { + protected void run() { throw new RuntimeException("boom"); } }; @@ -608,7 +608,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase() { return new SearchPhase("test") { @Override - public void run() { + protected void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); latch.countDown(); } @@ -688,7 +688,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase() { return new SearchPhase("test") { @Override - public void run() { + protected void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); latch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java index 6357155793fdf..f005f862720ff 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -207,7 +207,7 @@ public void sendExecuteQuery( protected SearchPhase getNextPhase() { return new SearchPhase("test") { @Override - public void run() { + protected void run() { latch.countDown(); } }; diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java index 8e4b8db77bb17..1952e9c26c570 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -86,7 +85,7 @@ protected SearchPhase moveToNextPhase(BiFunction assertEquals(1, movedCounter.incrementAndGet()); return new SearchPhase("test") { @Override - public void run() throws IOException { + protected void run() { latch.countDown(); } }; @@ -183,7 +182,7 @@ protected SearchPhase moveToNextPhase(BiFunction assertEquals(1, movedCounter.incrementAndGet()); return new SearchPhase("TEST_PHASE") { @Override - public void run() throws IOException { + protected void run() { throw new IllegalArgumentException("BOOM"); } }; @@ -261,7 +260,7 @@ protected SearchPhase moveToNextPhase(BiFunction assertEquals(1, movedCounter.incrementAndGet()); return new SearchPhase("test") { @Override - public void run() throws IOException { + protected void run() { latch.countDown(); } }; @@ -343,7 +342,7 @@ protected SearchPhase moveToNextPhase(BiFunction assertEquals(1, movedCounter.incrementAndGet()); return new SearchPhase("test") { @Override - public void run() throws IOException { + protected void run() { latch.countDown(); } }; diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 270315f23a53c..cc381d047fdfb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.DesiredNodesTestCase; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -64,6 +65,7 @@ import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; +import static org.elasticsearch.test.index.IndexVersionUtils.getPreviousVersion; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -228,17 +230,66 @@ public void testJoinClusterWithReadOnlyCompatibleIndices() { ) ); } + var indexCreated = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + getPreviousVersion(IndexVersions.MINIMUM_COMPATIBLE) + ); { - var indexMetadata = IndexMetadata.builder("read-only-compatible-but-unsupported") + var indexMetadata = IndexMetadata.builder("regular") .settings( Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.MINIMUM_READONLY_COMPATIBLE) .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) + .build() ) .numberOfShards(1) .numberOfReplicas(1) .build(); + NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ); + } + { + var settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated); + if (randomBoolean()) { + settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), randomBoolean()); + } + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_BLOCKS_WRITE, false); + } + var indexMetadata = IndexMetadata.builder("regular").settings(settings).numberOfShards(1).numberOfReplicas(1).build(); + + expectThrows( + IllegalStateException.class, + () -> NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ) + ); + } + { + var settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated); + if (randomBoolean()) { + settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), false); + } + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()); + } + + var indexMetadata = IndexMetadata.builder("regular-not-read-only-verified") + .settings(settings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + expectThrows( IllegalStateException.class, () -> NodeJoinExecutor.ensureIndexCompatibility( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index 417ae89da0a69..44ce491b1e51e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -25,6 +25,8 @@ import java.util.Collections; import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; +import static org.elasticsearch.test.index.IndexVersionUtils.getPreviousVersion; +import static org.elasticsearch.test.index.IndexVersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; public class IndexMetadataVerifierTests extends ESTestCase { @@ -105,7 +107,7 @@ public void testCustomSimilarity() { public void testIncompatibleVersion() { IndexMetadataVerifier service = getIndexMetadataVerifier(); - IndexVersion minCompat = IndexVersions.MINIMUM_COMPATIBLE; + IndexVersion minCompat = IndexVersions.MINIMUM_READONLY_COMPATIBLE; IndexVersion indexCreated = IndexVersion.fromId(randomIntBetween(1000099, minCompat.id() - 1)); final IndexMetadata metadata = newIndexMeta( "foo", @@ -124,7 +126,7 @@ public void testIncompatibleVersion() { + indexCreated.toReleaseVersion() + "] " + "but the minimum compatible version is [" - + minCompat.toReleaseVersion() + + IndexVersions.MINIMUM_COMPATIBLE.toReleaseVersion() + "]. It should be re-indexed in Elasticsearch " + (Version.CURRENT.major - 1) + ".x before upgrading to " @@ -133,20 +135,20 @@ public void testIncompatibleVersion() { ) ); - indexCreated = IndexVersionUtils.randomVersionBetween(random(), minCompat, IndexVersion.current()); + indexCreated = randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); IndexMetadata goodMeta = newIndexMeta("foo", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated).build()); service.verifyIndexMetadata(goodMeta, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } public void testReadOnlyVersionCompatibility() { var service = getIndexMetadataVerifier(); - var indexCreated = IndexVersions.MINIMUM_READONLY_COMPATIBLE; { var idxMetadata = newIndexMeta( - "not-searchable-snapshot", + "legacy", Settings.builder() .put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()) - .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) + .put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), randomBoolean()) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(6080099)) .build() ); String message = expectThrows( @@ -156,12 +158,9 @@ public void testReadOnlyVersionCompatibility() { assertThat( message, equalTo( - "The index [not-searchable-snapshot/" + "The index [legacy/" + idxMetadata.getIndexUUID() - + "] has current compatibility version [" - + indexCreated.toReleaseVersion() - + "] " - + "but the minimum compatible version is [" + + "] has current compatibility version [6.8.0] but the minimum compatible version is [" + IndexVersions.MINIMUM_COMPATIBLE.toReleaseVersion() + "]. It should be re-indexed in Elasticsearch " + (Version.CURRENT.major - 1) @@ -171,14 +170,61 @@ public void testReadOnlyVersionCompatibility() { ) ); } + var indexCreated = randomVersionBetween( + random(), + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + getPreviousVersion(IndexVersions.MINIMUM_COMPATIBLE) + ); { var idxMetadata = newIndexMeta( - "not-read-only", + "regular", Settings.builder() + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), true) .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) - .put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE) .build() ); + service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); + } + { + var settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated); + if (randomBoolean()) { + settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), randomBoolean()); + } + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_BLOCKS_WRITE, false); + } + + var idxMetadata = newIndexMeta("regular-no-write-block", settings.build()); + String message = expectThrows( + IllegalStateException.class, + () -> service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + ).getMessage(); + assertThat( + message, + equalTo( + "The index [regular-no-write-block/" + + idxMetadata.getIndexUUID() + + "] created in version [" + + indexCreated.toReleaseVersion() + + "] with current compatibility version [" + + indexCreated.toReleaseVersion() + + "] must be marked as read-only using the setting [index.blocks.write] set to [true] before upgrading to " + + Build.current().version() + + "." + ) + ); + } + { + var settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated); + if (randomBoolean()) { + settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), false); + } + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()); + } + + var idxMetadata = newIndexMeta("regular-not-read-only-verified", settings.build()); String message = expectThrows( IllegalStateException.class, () -> service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) @@ -186,10 +232,10 @@ public void testReadOnlyVersionCompatibility() { assertThat( message, equalTo( - "The index [not-read-only/" + "The index [regular-not-read-only-verified/" + idxMetadata.getIndexUUID() + "] created in version [" - + indexCreated + + indexCreated.toReleaseVersion() + "] with current compatibility version [" + indexCreated.toReleaseVersion() + "] must be marked as read-only using the setting [index.blocks.write] set to [true] before upgrading to " @@ -200,7 +246,7 @@ public void testReadOnlyVersionCompatibility() { } { var idxMetadata = newIndexMeta( - "good", + "searchable-snapshot", Settings.builder() .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) @@ -209,6 +255,42 @@ public void testReadOnlyVersionCompatibility() { ); service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } + { + var idxMetadata = newIndexMeta( + "archive", + Settings.builder() + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(6080099)) + .put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, indexCreated) + .build() + ); + service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); + } + { + var idxMetadata = newIndexMeta( + "archive-no-write-block", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(6080099)) + .put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, indexCreated) + .build() + ); + String message = expectThrows( + IllegalStateException.class, + () -> service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + ).getMessage(); + assertThat( + message, + equalTo( + "The index [archive-no-write-block/" + + idxMetadata.getIndexUUID() + + "] created in version [6.8.0] with current compatibility version [" + + indexCreated.toReleaseVersion() + + "] must be marked as read-only using the setting [index.blocks.write] set to [true] before upgrading to " + + Build.current().version() + + "." + ) + ); + } } private IndexMetadataVerifier getIndexMetadataVerifier() { diff --git a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java index 7a63934b4810b..eca30fdce2cf8 100644 --- a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java @@ -24,8 +24,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RelativeByteSizeValue; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.metadata.HealthMetadata; import org.elasticsearch.health.node.selection.HealthNode; @@ -119,18 +117,9 @@ public void setUp() throws Exception { client = mock(Client.class); - FeatureService featureService = new FeatureService(List.of(new HealthFeatures())); - mockHealthTracker = new MockHealthTracker(); - localHealthMonitor = LocalHealthMonitor.create( - Settings.EMPTY, - clusterService, - threadPool, - client, - featureService, - List.of(mockHealthTracker) - ); + localHealthMonitor = LocalHealthMonitor.create(Settings.EMPTY, clusterService, threadPool, client, List.of(mockHealthTracker)); } @After diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java index 51dadd8154549..3069589f9556c 100644 --- a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksService; @@ -78,7 +77,7 @@ public void setUp() throws Exception { localNodeId = clusterService.localNode().getId(); persistentTasksService = mock(PersistentTasksService.class); settings = Settings.builder().build(); - featureService = new FeatureService(List.of(new HealthFeatures())); + featureService = new FeatureService(List.of()); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index 5863d2f932968..a1a119d416f01 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -12,7 +12,7 @@ import org.apache.lucene.index.NoMergePolicy; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -21,7 +21,7 @@ public class LuceneChangesSnapshotTests extends SearchBasedChangesSnapshotTests { @Override protected Translog.Snapshot newRandomSnapshot( - MappingLookup mappingLookup, + MapperService mapperService, Engine.Searcher engineSearcher, int searchBatchSize, long fromSeqNo, @@ -32,6 +32,7 @@ protected Translog.Snapshot newRandomSnapshot( IndexVersion indexVersionCreated ) throws IOException { return new LuceneChangesSnapshot( + mapperService, engineSearcher, searchBatchSize, fromSeqNo, diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java index a5d5d9b210e33..2beeffef7fbd8 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.translog.Translog; @@ -31,7 +31,7 @@ protected Settings indexSettings() { @Override protected Translog.Snapshot newRandomSnapshot( - MappingLookup mappingLookup, + MapperService mapperService, Engine.Searcher engineSearcher, int searchBatchSize, long fromSeqNo, @@ -42,7 +42,7 @@ protected Translog.Snapshot newRandomSnapshot( IndexVersion indexVersionCreated ) throws IOException { return new LuceneSyntheticSourceChangesSnapshot( - mappingLookup, + mapperService, engineSearcher, searchBatchSize, randomLongBetween(0, ByteSizeValue.ofBytes(Integer.MAX_VALUE).getBytes()), diff --git a/server/src/test/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshotTests.java index 9cfa7321973a4..ed11ac7fd05c2 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshotTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.store.Store; @@ -46,7 +46,7 @@ protected Settings indexSettings() { } protected abstract Translog.Snapshot newRandomSnapshot( - MappingLookup mappingLookup, + MapperService mapperService, Engine.Searcher engineSearcher, int searchBatchSize, long fromSeqNo, @@ -117,7 +117,7 @@ public void testBasics() throws Exception { Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); try ( Translog.Snapshot snapshot = newRandomSnapshot( - engine.engineConfig.getMapperService().mappingLookup(), + engine.engineConfig.getMapperService(), searcher, between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, @@ -137,7 +137,7 @@ public void testBasics() throws Exception { searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); try ( Translog.Snapshot snapshot = newRandomSnapshot( - engine.engineConfig.getMapperService().mappingLookup(), + engine.engineConfig.getMapperService(), searcher, between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, @@ -163,7 +163,7 @@ public void testBasics() throws Exception { Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); try ( Translog.Snapshot snapshot = newRandomSnapshot( - engine.engineConfig.getMapperService().mappingLookup(), + engine.engineConfig.getMapperService(), searcher, between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, @@ -182,7 +182,7 @@ public void testBasics() throws Exception { searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); try ( Translog.Snapshot snapshot = newRandomSnapshot( - engine.engineConfig.getMapperService().mappingLookup(), + engine.engineConfig.getMapperService(), searcher, between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, @@ -206,7 +206,7 @@ public void testBasics() throws Exception { searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); try ( Translog.Snapshot snapshot = newRandomSnapshot( - engine.engineConfig.getMapperService().mappingLookup(), + engine.engineConfig.getMapperService(), searcher, between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, @@ -270,7 +270,7 @@ public void testSkipNonRootOfNestedDocuments() throws Exception { final boolean accessStats = randomBoolean(); try ( Translog.Snapshot snapshot = newRandomSnapshot( - engine.engineConfig.getMapperService().mappingLookup(), + engine.engineConfig.getMapperService(), searcher, between(1, 100), 0, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java index 4b0fac2cf2e0f..c17c0d10410fa 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java @@ -59,7 +59,9 @@ public void testGetMappings() { } public void testGetIndex() { - GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex().setFeatures(GetIndexRequest.Feature.MAPPINGS).get(); + GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) + .setFeatures(GetIndexRequest.Feature.MAPPINGS) + .get(); assertExpectedMappings(getIndexResponse.mappings()); } diff --git a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java index 15febaa8db2ab..8a84b0ec59b51 100644 --- a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java +++ b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.http.HttpInfo; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; -import org.elasticsearch.reservedstate.service.FileSettingsFeatures; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; @@ -48,7 +47,6 @@ import java.nio.channels.ServerSocketChannel; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; import static org.elasticsearch.cluster.metadata.ReservedStateErrorMetadata.ErrorKind.TRANSIENT; @@ -60,7 +58,6 @@ public class ReadinessServiceTests extends ESTestCase implements ReadinessClient private ThreadPool threadpool; private Environment env; private FakeHttpTransport httpTransport; - private static final Set nodeFeatures = Set.of(FileSettingsFeatures.FILE_SETTINGS_SUPPORTED.id()); private static Metadata emptyReservedStateMetadata; static { @@ -310,26 +307,6 @@ public void testFileSettingsUpdateError() throws Exception { readinessService.close(); } - public void testFileSettingsMixedCluster() throws Exception { - readinessService.start(); - - // initially the service isn't ready because initial cluster state has not been applied yet - assertFalse(readinessService.ready()); - - ClusterState noFileSettingsState = ClusterState.builder(noFileSettingsState()) - // the master node is upgraded to support file settings, but existing node2 is not - .nodeFeatures(Map.of(httpTransport.node.getId(), nodeFeatures)) - .build(); - ClusterChangedEvent event = new ClusterChangedEvent("test", noFileSettingsState, emptyState()); - readinessService.clusterChanged(event); - - // when upgrading from nodes before file settings exist, readiness should return true once a master is elected - assertTrue(readinessService.ready()); - - readinessService.stop(); - readinessService.close(); - } - private ClusterState emptyState() { return ClusterState.builder(new ClusterName("cluster")) .nodes( @@ -347,7 +324,6 @@ private ClusterState noFileSettingsState() { .masterNodeId(httpTransport.node.getId()) .localNodeId(httpTransport.node.getId()) ) - .nodeFeatures(Map.of(httpTransport.node.getId(), nodeFeatures, "node2", nodeFeatures)) .build(); } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java index 16e066e486de9..fe602d2854c8c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java @@ -1215,7 +1215,7 @@ public void testSearchWhileIndexDeletedDoesNotLeakSearchContext() throws Executi // ok } - expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().setIndices("index").get()); + expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices("index").get()); assertEquals(0, service.getActiveContexts()); diff --git a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java index da28b0eff441f..2724b86f9acd4 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; @@ -88,10 +89,7 @@ protected KnnRetrieverBuilder createTestInstance() { protected KnnRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { return (KnnRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder( parser, - new RetrieverParserContext( - new SearchUsage(), - nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED - ) + new RetrieverParserContext(new SearchUsage(), Predicates.never()) ); } diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java deleted file mode 100644 index 6448d11de2e47..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.search.retriever; - -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.json.JsonXContent; - -import java.io.IOException; -import java.util.List; - -/** Tests retrievers validate on their own {@link NodeFeature} */ -public class RetrieverBuilderVersionTests extends ESTestCase { - - public void testRetrieverVersions() throws IOException { - try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) { - SearchSourceBuilder ssb = new SearchSourceBuilder(); - ParsingException iae = expectThrows(ParsingException.class, () -> ssb.parseXContent(parser, true, nf -> false)); - assertEquals("Unknown key for a START_OBJECT in [retriever].", iae.getMessage()); - } - - try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) { - SearchSourceBuilder ssb = new SearchSourceBuilder(); - ParsingException iae = expectThrows( - ParsingException.class, - () -> ssb.parseXContent(parser, true, nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED) - ); - assertEquals("unknown retriever [standard]", iae.getMessage()); - } - - try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) { - SearchSourceBuilder ssb = new SearchSourceBuilder(); - ssb.parseXContent( - parser, - true, - nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED - ); - } - - try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"knn\":{}}}")) { - SearchSourceBuilder ssb = new SearchSourceBuilder(); - ParsingException iae = expectThrows( - ParsingException.class, - () -> ssb.parseXContent(parser, true, nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED) - ); - assertEquals("unknown retriever [knn]", iae.getMessage()); - } - - try ( - XContentParser parser = createParser( - JsonXContent.jsonXContent, - "{\"retriever\":{\"knn\":{\"field\": \"test\", \"k\": 2, \"num_candidates\": 5, \"query_vector\": [1, 2, 3]}}}" - ) - ) { - SearchSourceBuilder ssb = new SearchSourceBuilder(); - ssb.parseXContent( - parser, - true, - nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED - ); - } - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents()); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java index eacd949077bc4..979c588089f5c 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; @@ -100,10 +101,7 @@ protected StandardRetrieverBuilder createTestInstance() { protected StandardRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { return (StandardRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder( parser, - new RetrieverParserContext( - new SearchUsage(), - nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED - ) + new RetrieverParserContext(new SearchUsage(), Predicates.never()) ); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java index cc21ade314715..cadd9d5196f69 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; import org.elasticsearch.health.Diagnosis.Resource.Type; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorResult; import org.elasticsearch.health.SimpleHealthIndicatorDetails; @@ -37,7 +36,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.stream.Stream; import static org.elasticsearch.cluster.node.DiscoveryNode.DISCOVERY_NODE_COMPARATOR; @@ -382,10 +380,7 @@ public void testMappedFieldsForTelemetry() { } private ClusterState createClusterStateWith(RepositoriesMetadata metadata) { - var features = Set.of(HealthFeatures.SUPPORTS_EXTENDED_REPOSITORY_INDICATOR.id()); - var builder = ClusterState.builder(new ClusterName("test-cluster")) - .nodes(DiscoveryNodes.builder().add(node1).add(node2).build()) - .nodeFeatures(Map.of(node1.getId(), features, node2.getId(), features)); + var builder = ClusterState.builder(new ClusterName("test-cluster")).nodes(DiscoveryNodes.builder().add(node1).add(node2).build()); if (metadata != null) { builder.metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, metadata)); } @@ -399,7 +394,7 @@ private static RepositoryMetadata createRepositoryMetadata(String name, boolean private RepositoryIntegrityHealthIndicatorService createRepositoryIntegrityHealthIndicatorService(ClusterState clusterState) { var clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(clusterState); - return new RepositoryIntegrityHealthIndicatorService(clusterService, featureService); + return new RepositoryIntegrityHealthIndicatorService(clusterService); } private SimpleHealthIndicatorDetails createDetails(int total, int corruptedCount, List corrupted, int unknown, int invalid) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 7a07e407024ce..0fe886f37aa47 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -241,9 +241,7 @@ import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class SnapshotResiliencyTests extends ESTestCase { @@ -2111,8 +2109,6 @@ protected void connectToNodesAndWait(ClusterState newClusterState) { } ); recoverySettings = new RecoverySettings(settings, clusterSettings); - FeatureService mockFeatureService = mock(FeatureService.class); - when(mockFeatureService.clusterHasFeature(any(), any())).thenReturn(true); mockTransport = new DisruptableMockTransport(node, deterministicTaskQueue) { @Override protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) { @@ -2403,7 +2399,6 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { null, FailureStoreMetrics.NOOP ), - mockFeatureService, client, actionFilters, indexNameExpressionResolver, diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java index c0185832d6122..9b56cd3bde53c 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.transport.InboundDecoder.ChannelType; @@ -125,12 +126,12 @@ public void testDecode() throws IOException { } + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // can delete test in v9 public void testDecodePreHeaderSizeVariableInt() throws IOException { - // TODO: Can delete test on 9.0 Compression.Scheme compressionScheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.DEFLATE, null); String action = "test-request"; long requestId = randomNonNegativeLong(); - final TransportVersion preHeaderVariableInt = TransportHandshaker.EARLIEST_HANDSHAKE_VERSION; + final TransportVersion preHeaderVariableInt = TransportHandshaker.V7_HANDSHAKE_VERSION; final String contentValue = randomAlphaOfLength(100); // 8.0 is only compatible with handshakes on a pre-variable int version final OutboundMessage message = new OutboundMessage.Request( @@ -189,7 +190,7 @@ public void testDecodeHandshakeV7Compatibility() throws IOException { final String headerKey = randomAlphaOfLength(10); final String headerValue = randomAlphaOfLength(20); threadContext.putHeader(headerKey, headerValue); - TransportVersion handshakeCompat = TransportHandshaker.EARLIEST_HANDSHAKE_VERSION; + TransportVersion handshakeCompat = TransportHandshaker.V7_HANDSHAKE_VERSION; OutboundMessage message = new OutboundMessage.Request( threadContext, new TestRequest(randomAlphaOfLength(100)), @@ -225,8 +226,8 @@ public void testDecodeHandshakeV7Compatibility() throws IOException { } public void testDecodeHandshakeV8Compatibility() throws IOException { - doHandshakeCompatibilityTest(TransportHandshaker.REQUEST_HANDSHAKE_VERSION, null); - doHandshakeCompatibilityTest(TransportHandshaker.REQUEST_HANDSHAKE_VERSION, Compression.Scheme.DEFLATE); + doHandshakeCompatibilityTest(TransportHandshaker.V8_HANDSHAKE_VERSION, null); + doHandshakeCompatibilityTest(TransportHandshaker.V8_HANDSHAKE_VERSION, Compression.Scheme.DEFLATE); } public void testDecodeHandshakeV9Compatibility() throws IOException { @@ -286,13 +287,18 @@ public void testClientChannelTypeFailsDecodingRequests() throws Exception { } } // a request + final var isHandshake = randomBoolean(); + final var version = isHandshake + ? randomFrom(TransportHandshaker.ALLOWED_HANDSHAKE_VERSIONS) + : TransportVersionUtils.randomCompatibleVersion(random()); + logger.info("--> version = {}", version); OutboundMessage message = new OutboundMessage.Request( threadContext, new TestRequest(randomAlphaOfLength(100)), - TransportHandshaker.REQUEST_HANDSHAKE_VERSION, + version, action, requestId, - randomBoolean(), + isHandshake, randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4, null) ); @@ -309,9 +315,9 @@ public void testClientChannelTypeFailsDecodingRequests() throws Exception { try (InboundDecoder decoder = new InboundDecoder(recycler, randomFrom(ChannelType.SERVER, ChannelType.MIX))) { final ArrayList fragments = new ArrayList<>(); int bytesConsumed = decoder.decode(wrapAsReleasable(bytes), fragments::add); - int totalHeaderSize = TcpHeader.headerSize(TransportVersion.current()) + bytes.getInt( - TcpHeader.VARIABLE_HEADER_SIZE_POSITION - ); + int totalHeaderSize = TcpHeader.headerSize(version) + (version.onOrAfter(TransportVersions.V_7_6_0) + ? bytes.getInt(TcpHeader.VARIABLE_HEADER_SIZE_POSITION) + : 0); assertEquals(totalHeaderSize, bytesConsumed); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -331,12 +337,16 @@ public void testServerChannelTypeFailsDecodingResponses() throws Exception { } } // a response + final var isHandshake = randomBoolean(); + final var version = isHandshake + ? randomFrom(TransportHandshaker.ALLOWED_HANDSHAKE_VERSIONS) + : TransportVersionUtils.randomCompatibleVersion(random()); OutboundMessage message = new OutboundMessage.Response( threadContext, new TestResponse(randomAlphaOfLength(100)), - TransportHandshaker.REQUEST_HANDSHAKE_VERSION, + version, requestId, - randomBoolean(), + isHandshake, randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4, null) ); @@ -351,9 +361,9 @@ public void testServerChannelTypeFailsDecodingResponses() throws Exception { try (InboundDecoder decoder = new InboundDecoder(recycler, randomFrom(ChannelType.CLIENT, ChannelType.MIX))) { final ArrayList fragments = new ArrayList<>(); int bytesConsumed = decoder.decode(wrapAsReleasable(bytes), fragments::add); - int totalHeaderSize = TcpHeader.headerSize(TransportVersion.current()) + bytes.getInt( - TcpHeader.VARIABLE_HEADER_SIZE_POSITION - ); + int totalHeaderSize = TcpHeader.headerSize(version) + (version.onOrAfter(TransportVersions.V_7_6_0) + ? bytes.getInt(TcpHeader.VARIABLE_HEADER_SIZE_POSITION) + : 0); assertEquals(totalHeaderSize, bytesConsumed); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -449,7 +459,7 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { final String headerKey = randomAlphaOfLength(10); final String headerValue = randomAlphaOfLength(20); threadContext.putHeader(headerKey, headerValue); - TransportVersion handshakeCompat = TransportHandshaker.EARLIEST_HANDSHAKE_VERSION; + TransportVersion handshakeCompat = TransportHandshaker.V7_HANDSHAKE_VERSION; OutboundMessage message = new OutboundMessage.Request( threadContext, new TestRequest(randomAlphaOfLength(100)), diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index 8ff7c511fa485..47e0dc1f61cac 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -34,6 +35,7 @@ import org.elasticsearch.core.Streams; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; import org.elasticsearch.test.TransportVersionUtils; @@ -133,11 +135,14 @@ public void testSendRawBytes() { public void testSendRequest() throws IOException { ThreadContext threadContext = threadPool.getThreadContext(); - TransportVersion version = TransportHandshaker.REQUEST_HANDSHAKE_VERSION; String action = "handshake"; long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); - boolean compress = randomBoolean(); + TransportVersion version = isHandshake + ? randomFrom(TransportHandshaker.ALLOWED_HANDSHAKE_VERSIONS) + : TransportVersionUtils.randomCompatibleVersion(random()); + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // drop the version.onOrAfter() in v9 + boolean compress = version.onOrAfter(TransportVersions.MINIMUM_COMPATIBLE) && randomBoolean(); String value = "message"; threadContext.putHeader("header", "header_value"); TestRequest request = new TestRequest(value); @@ -204,11 +209,14 @@ public void onRequestSent( public void testSendResponse() throws IOException { ThreadContext threadContext = threadPool.getThreadContext(); - TransportVersion version = TransportHandshaker.REQUEST_HANDSHAKE_VERSION; String action = "handshake"; long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); - boolean compress = randomBoolean(); + TransportVersion version = isHandshake + ? randomFrom(TransportHandshaker.ALLOWED_HANDSHAKE_VERSIONS) + : TransportVersionUtils.randomCompatibleVersion(random()); + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // drop the version.onOrAfter() in v9 + boolean compress = version.onOrAfter(TransportVersions.MINIMUM_COMPATIBLE) && randomBoolean(); String value = "message"; threadContext.putHeader("header", "header_value"); @@ -269,8 +277,8 @@ public void onResponseSent(long requestId, String action, TransportResponse resp public void testErrorResponse() throws IOException { ThreadContext threadContext = threadPool.getThreadContext(); - TransportVersion version = TransportHandshaker.REQUEST_HANDSHAKE_VERSION; - String action = "handshake"; + TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random()); + String action = "not-a-handshake"; long requestId = randomLongBetween(0, 300); threadContext.putHeader("header", "header_value"); ElasticsearchException error = new ElasticsearchException("boom"); @@ -322,7 +330,7 @@ public void onResponseSent(long requestId, String action, Exception error) { } public void testSendErrorAfterFailToSendResponse() throws Exception { - TransportVersion version = TransportHandshaker.REQUEST_HANDSHAKE_VERSION; + TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random()); String action = randomAlphaOfLength(10); long requestId = randomLongBetween(0, 300); var response = new ReleasbleTestResponse(randomAlphaOfLength(10)) { diff --git a/server/src/test/java/org/elasticsearch/transport/TransportHandshakerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportHandshakerTests.java index 26b76c798b2f2..2eceaa8e421e4 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportHandshakerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportHandshakerTests.java @@ -37,6 +37,8 @@ public class TransportHandshakerTests extends ESTestCase { private TestThreadPool threadPool; private TransportHandshaker.HandshakeRequestSender requestSender; + private static final TransportVersion HANDSHAKE_REQUEST_VERSION = TransportHandshaker.V8_HANDSHAKE_VERSION; + @Override public void setUp() throws Exception { super.setUp(); @@ -64,7 +66,7 @@ public void testHandshakeRequestAndResponse() throws IOException { long reqId = randomLongBetween(1, 10); handshaker.sendHandshake(reqId, node, channel, new TimeValue(30, TimeUnit.SECONDS), versionFuture); - verify(requestSender).sendRequest(node, channel, reqId, TransportHandshaker.REQUEST_HANDSHAKE_VERSION); + verify(requestSender).sendRequest(node, channel, reqId, HANDSHAKE_REQUEST_VERSION); assertFalse(versionFuture.isDone()); @@ -87,7 +89,7 @@ public void testHandshakeRequestFutureVersionsCompatibility() throws IOException long reqId = randomLongBetween(1, 10); handshaker.sendHandshake(reqId, node, channel, new TimeValue(30, TimeUnit.SECONDS), new PlainActionFuture<>()); - verify(requestSender).sendRequest(node, channel, reqId, TransportHandshaker.REQUEST_HANDSHAKE_VERSION); + verify(requestSender).sendRequest(node, channel, reqId, HANDSHAKE_REQUEST_VERSION); TransportHandshaker.HandshakeRequest handshakeRequest = new TransportHandshaker.HandshakeRequest(TransportVersion.current()); BytesStreamOutput currentHandshakeBytes = new BytesStreamOutput(); @@ -123,7 +125,7 @@ public void testHandshakeError() throws IOException { long reqId = randomLongBetween(1, 10); handshaker.sendHandshake(reqId, node, channel, new TimeValue(30, TimeUnit.SECONDS), versionFuture); - verify(requestSender).sendRequest(node, channel, reqId, TransportHandshaker.REQUEST_HANDSHAKE_VERSION); + verify(requestSender).sendRequest(node, channel, reqId, HANDSHAKE_REQUEST_VERSION); assertFalse(versionFuture.isDone()); @@ -138,8 +140,7 @@ public void testHandshakeError() throws IOException { public void testSendRequestThrowsException() throws IOException { PlainActionFuture versionFuture = new PlainActionFuture<>(); long reqId = randomLongBetween(1, 10); - doThrow(new IOException("boom")).when(requestSender) - .sendRequest(node, channel, reqId, TransportHandshaker.REQUEST_HANDSHAKE_VERSION); + doThrow(new IOException("boom")).when(requestSender).sendRequest(node, channel, reqId, HANDSHAKE_REQUEST_VERSION); handshaker.sendHandshake(reqId, node, channel, new TimeValue(30, TimeUnit.SECONDS), versionFuture); @@ -154,7 +155,7 @@ public void testHandshakeTimeout() throws IOException { long reqId = randomLongBetween(1, 10); handshaker.sendHandshake(reqId, node, channel, new TimeValue(100, TimeUnit.MILLISECONDS), versionFuture); - verify(requestSender).sendRequest(node, channel, reqId, TransportHandshaker.REQUEST_HANDSHAKE_VERSION); + verify(requestSender).sendRequest(node, channel, reqId, HANDSHAKE_REQUEST_VERSION); ConnectTransportException cte = expectThrows(ConnectTransportException.class, versionFuture::actionGet); assertThat(cte.getMessage(), containsString("handshake_timeout")); diff --git a/test/external-modules/delayed-aggs/src/main/java/org/elasticsearch/test/delayedshard/DelayedShardAggregationBuilder.java b/test/external-modules/delayed-aggs/src/main/java/org/elasticsearch/test/delayedshard/DelayedShardAggregationBuilder.java index 74a39c395a058..5aeef2abfed12 100644 --- a/test/external-modules/delayed-aggs/src/main/java/org/elasticsearch/test/delayedshard/DelayedShardAggregationBuilder.java +++ b/test/external-modules/delayed-aggs/src/main/java/org/elasticsearch/test/delayedshard/DelayedShardAggregationBuilder.java @@ -136,6 +136,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_10_0; + return TransportVersions.ZERO; } } diff --git a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java index 56e702d2f76a0..ae3eb224d6886 100644 --- a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java +++ b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java @@ -10,6 +10,7 @@ package org.elasticsearch.test.fixtures.minio; import org.elasticsearch.test.fixtures.testcontainers.DockerEnvironmentAwareTestContainer; +import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.images.builder.ImageFromDockerfile; public final class MinioTestContainer extends DockerEnvironmentAwareTestContainer { @@ -31,6 +32,10 @@ public MinioTestContainer(boolean enabled, String accessKey, String secretKey, S ); if (enabled) { addExposedPort(servicePort); + // The following waits for a specific log message as the readiness signal. When the minio docker image + // gets upgraded in future, we must ensure the log message still exists or update it here accordingly. + // Otherwise the tests using the minio fixture will fail with timeout on waiting the container to be ready. + setWaitStrategy(Wait.forLogMessage("API: .*:9000.*", 1)); } this.enabled = enabled; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java index fe1b08d5e738d..37352410811a8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -29,18 +30,28 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Collections; +import java.util.List; import static org.elasticsearch.test.ESTestCase.createTestAnalysis; public class MapperTestUtils { + public static MapperService newMapperService( + NamedXContentRegistry xContentRegistry, + Path tempDir, + Settings indexSettings, + String indexName + ) throws IOException { + return newMapperService(List.of(), xContentRegistry, tempDir, indexSettings, indexName); + } public static MapperService newMapperService( + List extraMappers, NamedXContentRegistry xContentRegistry, Path tempDir, Settings indexSettings, String indexName ) throws IOException { - IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + IndicesModule indicesModule = new IndicesModule(extraMappers); return newMapperService(xContentRegistry, tempDir, indexSettings, indicesModule, indexName); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 9a160fffb965c..7a2f375001874 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -99,6 +99,7 @@ import org.elasticsearch.index.translog.TranslogDeletionPolicy; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; @@ -220,6 +221,10 @@ protected String defaultMapping() { """; } + protected List extraMappers() { + return List.of(); + } + @Override @Before public void setUp() throws Exception { @@ -241,7 +246,7 @@ public void setUp() throws Exception { Lucene.cleanLuceneIndex(store.directory()); Lucene.cleanLuceneIndex(storeReplica.directory()); primaryTranslogDir = createTempDir("translog-primary"); - mapperService = createMapperService(defaultSettings.getSettings(), defaultMapping()); + mapperService = createMapperService(defaultSettings.getSettings(), defaultMapping(), extraMappers()); translogHandler = createTranslogHandler(mapperService); engine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy()); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); @@ -1440,15 +1445,21 @@ public static void assertAtMostOneLuceneDocumentPerSequenceNumber(IndexSettings } public static MapperService createMapperService() throws IOException { - return createMapperService(Settings.EMPTY, "{}"); + return createMapperService(Settings.EMPTY, "{}", List.of()); } public static MapperService createMapperService(Settings settings, String mappings) throws IOException { + return createMapperService(settings, mappings, List.of()); + } + + public static MapperService createMapperService(Settings settings, String mappings, List extraMappers) + throws IOException { IndexMetadata indexMetadata = IndexMetadata.builder("index") .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(settings)) .putMapping(mappings) .build(); MapperService mapperService = MapperTestUtils.newMapperService( + extraMappers, new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), createTempDir(), indexMetadata.getSettings(), diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index b33abc2d781e8..bb48b0031483c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -1213,7 +1213,7 @@ private void assertSyntheticSourceWithTranslogSnapshot(SyntheticSourceSupport su try (var indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) { int start = randomBoolean() ? 0 : randomIntBetween(1, maxDocs - 10); var snapshot = new LuceneSyntheticSourceChangesSnapshot( - mapperService.mappingLookup(), + mapperService, new Engine.Searcher( "recovery", indexReader, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index c70664940bac6..5793e8613bbd5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1613,7 +1613,7 @@ protected static boolean indexExists(String index) { public static boolean indexExists(String index, Client client) { GetIndexResponse getIndexResponse = client.admin() .indices() - .prepareGetIndex() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) .setIndices(index) .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED) .get(); @@ -2564,14 +2564,14 @@ private static boolean isSuiteScopedTest(Class clazz) { } public static Index resolveIndex(String index) { - GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex().setIndices(index).get(); + GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(index).get(); assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); String uuid = getIndexResponse.getSettings().get(index).get(IndexMetadata.SETTING_INDEX_UUID); return new Index(index, uuid); } public static String resolveCustomDataPath(String index) { - GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex().setIndices(index).get(); + GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(index).get(); assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); return getIndexResponse.getSettings().get(index).get(IndexMetadata.SETTING_DATA_PATH); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 63334bd70306f..9bee617d90db9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -171,7 +171,7 @@ public void tearDown() throws Exception { metadata.transientSettings().size(), equalTo(0) ); - GetIndexResponse indices = indicesAdmin().prepareGetIndex() + GetIndexResponse indices = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN) .addIndices("*") .get(); @@ -401,7 +401,7 @@ protected IndexService createIndex(String index, CreateIndexRequestBuilder creat } public Index resolveIndex(String index) { - GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex().setIndices(index).get(); + GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(index).get(); assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); String uuid = getIndexResponse.getSettings().get(index).get(IndexMetadata.SETTING_INDEX_UUID); return new Index(index, uuid); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index f678f4af22328..a271c999a2ba7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1237,7 +1237,7 @@ public static Long randomLongOrNull() { return randomBoolean() ? null : randomLong(); } - public static Long randomPositiveLongOrNull() { + public static Long randomNonNegativeLongOrNull() { return randomBoolean() ? null : randomNonNegativeLong(); } @@ -1245,7 +1245,7 @@ public static Integer randomIntOrNull() { return randomBoolean() ? null : randomInt(); } - public static Integer randomPositiveIntOrNull() { + public static Integer randomNonNegativeIntOrNull() { return randomBoolean() ? null : randomNonNegativeInt(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java index 9054dc6f94182..e2a24eac6f8ab 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -91,6 +91,12 @@ public boolean clusterHasFeature(String featureId, boolean any) { } if (hasFeatureMetadata()) { + if (isRestApiCompatibilityTest()) { + // assume this is a feature that has been assumed, then removed in this version + // the feature is therefore logically present, but not specified by the cluster + return true; + } + throw new IllegalArgumentException( Strings.format( "Unknown feature %s: check the respective FeatureSpecification is provided both in module-info.java " @@ -102,6 +108,10 @@ public boolean clusterHasFeature(String featureId, boolean any) { return false; } + private static boolean isRestApiCompatibilityTest() { + return Boolean.parseBoolean(System.getProperty("tests.restCompat", "false")); + } + public static boolean hasFeatureMetadata() { return MetadataHolder.FEATURE_NAMES.isEmpty() == false; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java index 3461073b19ebe..c5200c816fb6e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -138,11 +138,6 @@ Transport getDelegate() { return delegate; } - @Override - public TransportVersion getVersion() { - return delegate.getVersion(); - } - @Override public void setMessageListener(TransportMessageListener listener) { delegate.setMessageListener(listener); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index 6070ec140d254..8aa6ea4bc9e26 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -19,6 +19,7 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionResolver; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.local.model.User; +import org.elasticsearch.test.cluster.util.ArchivePatcher; import org.elasticsearch.test.cluster.util.IOUtils; import org.elasticsearch.test.cluster.util.OS; import org.elasticsearch.test.cluster.util.Pair; @@ -651,27 +652,56 @@ private void installPlugins() { .toList(); List toInstall = spec.getPlugins() + .entrySet() .stream() .map( - pluginName -> pluginPaths.stream() + plugin -> pluginPaths.stream() .map(path -> Pair.of(pattern.matcher(path.getFileName().toString()), path)) - .filter(pair -> pair.left.matches() && pair.left.group(1).equals(pluginName)) + .filter(pair -> pair.left.matches() && pair.left.group(1).equals(plugin.getKey())) .map(p -> p.right.getParent().resolve(p.left.group(0))) .findFirst() + .map(path -> { + DefaultPluginInstallSpec installSpec = plugin.getValue(); + // Path the plugin archive with configured overrides if necessary + if (installSpec.entitlementsOverride != null || installSpec.propertiesOverride != null) { + Path target; + try { + target = Files.createTempFile("patched-", path.getFileName().toString()); + } catch (IOException e) { + throw new UncheckedIOException("Failed to create temporary file", e); + } + ArchivePatcher patcher = new ArchivePatcher(path, target); + if (installSpec.entitlementsOverride != null) { + patcher.override( + "entitlement-policy.yaml", + original -> installSpec.entitlementsOverride.apply(original).asStream() + ); + } + if (installSpec.propertiesOverride != null) { + patcher.override( + "plugin-descriptor.properties", + original -> installSpec.propertiesOverride.apply(original).asStream() + ); + } + return patcher.patch(); + } else { + return path; + } + }) .orElseThrow(() -> { String taskPath = System.getProperty("tests.task"); String project = taskPath.substring(0, taskPath.lastIndexOf(':')); - throw new RuntimeException( + return new RuntimeException( "Unable to locate plugin '" - + pluginName + + plugin.getKey() + "'. Ensure you've added the following to the build script for project '" + project + "':\n\n" + "dependencies {\n" + " clusterPlugins " + "project(':plugins:" - + pluginName + + plugin.getKey() + "')" + "\n}" ); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index c3c4f3fe825ed..1ef4bcbfb6120 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; @@ -34,7 +35,7 @@ public abstract class AbstractLocalSpecBuilder> im private final List environmentProviders = new ArrayList<>(); private final Map environment = new HashMap<>(); private final Set modules = new HashSet<>(); - private final Set plugins = new HashSet<>(); + private final Map plugins = new HashMap<>(); private final Set features = EnumSet.noneOf(FeatureFlag.class); private final List keystoreProviders = new ArrayList<>(); private final Map keystoreSettings = new HashMap<>(); @@ -132,11 +133,19 @@ Set getModules() { @Override public T plugin(String pluginName) { - this.plugins.add(pluginName); + this.plugins.put(pluginName, new DefaultPluginInstallSpec()); return cast(this); } - Set getPlugins() { + @Override + public T plugin(String pluginName, Consumer config) { + DefaultPluginInstallSpec spec = new DefaultPluginInstallSpec(); + config.accept(spec); + this.plugins.put(pluginName, spec); + return cast(this); + } + + Map getPlugins() { return inherit(() -> parent.getPlugins(), plugins); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultPluginInstallSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultPluginInstallSpec.java new file mode 100644 index 0000000000000..364bac1586d69 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultPluginInstallSpec.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.cluster.local; + +import org.elasticsearch.test.cluster.util.resource.Resource; + +import java.util.function.Function; + +public class DefaultPluginInstallSpec implements PluginInstallSpec { + Function propertiesOverride; + Function entitlementsOverride; + + @Override + public PluginInstallSpec withPropertiesOverride(Function override) { + this.propertiesOverride = override; + return this; + } + + @Override + public PluginInstallSpec withEntitlementsOverride(Function override) { + this.entitlementsOverride = override; + return this; + } +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index 02fdb45dffa37..b9e9520e77ebb 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -92,7 +92,7 @@ public static class LocalNodeSpec { private final List environmentProviders; private final Map environment; private final Set modules; - private final Set plugins; + private final Map plugins; private final DistributionType distributionType; private final Set features; private final List keystoreProviders; @@ -114,7 +114,7 @@ public LocalNodeSpec( List environmentProviders, Map environment, Set modules, - Set plugins, + Map plugins, DistributionType distributionType, Set features, List keystoreProviders, @@ -179,7 +179,7 @@ public Set getModules() { return modules; } - public Set getPlugins() { + public Map getPlugins() { return plugins; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index 1c9ac8a0af6cc..2b44126fef4ee 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -18,6 +18,7 @@ import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; +import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; @@ -73,6 +74,11 @@ interface LocalSpecBuilder> { */ T plugin(String pluginName); + /** + * Ensure plugin is installed into the distribution. + */ + T plugin(String pluginName, Consumer config); + /** * Require feature to be enabled in the cluster. */ diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/PluginInstallSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/PluginInstallSpec.java new file mode 100644 index 0000000000000..6b0b13ddd2dd1 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/PluginInstallSpec.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.cluster.local; + +import org.elasticsearch.test.cluster.util.resource.Resource; + +import java.util.function.Function; + +public interface PluginInstallSpec { + + /** + * Override bundled plugin properties file with the given {@link Resource}. The provided override function receives the original + * file content as function argument. + * + * @param override function returning resource used to override bundled properties file + */ + PluginInstallSpec withPropertiesOverride(Function override); + + /** + * Override bundled entitlements policy file with the given {@link Resource}. The provided override function receives the original + * file content as function argument. + * + * @param override function returning resource used to override bundled entitlements policy file + */ + PluginInstallSpec withEntitlementsOverride(Function override); +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ArchivePatcher.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ArchivePatcher.java new file mode 100644 index 0000000000000..269d1dd9f516c --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ArchivePatcher.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.cluster.util; + +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; +import java.util.zip.ZipOutputStream; + +public class ArchivePatcher { + private final Path original; + private final Path target; + private final Map> overrides = new HashMap<>(); + + public ArchivePatcher(Path original, Path target) { + this.original = original; + this.target = target; + } + + public void override(String filename, Function override) { + this.overrides.put(filename, override); + } + + public Path patch() { + try ( + ZipFile input = new ZipFile(original.toFile()); + ZipOutputStream output = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(target.toFile()))) + ) { + Enumeration entries = input.entries(); + while (entries.hasMoreElements()) { + ZipEntry entry = entries.nextElement(); + output.putNextEntry(entry); + if (overrides.containsKey(entry.getName())) { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(input.getInputStream(entry)))) { + String content = reader.lines().collect(Collectors.joining(System.lineSeparator())); + overrides.get(entry.getName()).apply(content).transferTo(output); + } + } else { + input.getInputStream(entry).transferTo(output); + } + output.closeEntry(); + } + output.flush(); + output.finish(); + } catch (IOException e) { + throw new UncheckedIOException("Failed to patch archive", e); + } + + return target; + } +} diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 357c0fe74b5ce..031f8f908847c 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction; import org.elasticsearch.test.rest.Stash; import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; @@ -300,41 +299,24 @@ public Optional clusterHasCapabilities( params.put("capabilities", capabilitiesString); } params.put("error_trace", "false"); // disable error trace - - if (clusterHasFeature(RestNodesCapabilitiesAction.LOCAL_ONLY_CAPABILITIES.id(), false) == false) { - // can only check the whole cluster - if (any) { - logger.warn( - "Cluster does not support checking individual nodes for capabilities," - + "check for [{} {}?{} {}] may be incorrect in mixed-version clusters", - method, - path, - parametersString, - capabilitiesString - ); - } - return checkCapability(NodeSelector.ANY, params); - } else { - // check each node individually - we can actually check any here - params.put("local_only", "true"); // we're calling each node individually - - // individually call each node, so we can control whether we do an 'any' or 'all' check - List nodes = clientYamlTestClient.getRestClient(NodeSelector.ANY).getNodes(); - - for (Node n : nodes) { - Optional nodeResult = checkCapability(new SpecificNodeSelector(n), params); - if (nodeResult.isEmpty()) { - return Optional.empty(); - } else if (any == nodeResult.get()) { - // either any == true and node has cap, - // or any == false (ie all) and this node does not have cap - return nodeResult; - } + params.put("local_only", "true"); // we're calling each node individually + + // individually call each node, so we can control whether we do an 'any' or 'all' check + List nodes = clientYamlTestClient.getRestClient(NodeSelector.ANY).getNodes(); + + for (Node n : nodes) { + Optional nodeResult = checkCapability(new SpecificNodeSelector(n), params); + if (nodeResult.isEmpty()) { + return Optional.empty(); + } else if (any == nodeResult.get()) { + // either any == true and node has cap, + // or any == false (ie all) and this node does not have cap + return nodeResult; } - - // if we got here, either any is true and no node has it, or any == false and all nodes have it - return Optional.of(any == false); } + + // if we got here, either any is true and no node has it, or any == false and all nodes have it + return Optional.of(any == false); } private Optional checkCapability(NodeSelector nodeSelector, Map params) { diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java index 76ee8272fe345..ae93faa69f2c4 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java @@ -134,6 +134,6 @@ protected boolean overrideBucketsPath() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineAggregationBuilder.java index e61f01abcbedc..de32ab9587ef9 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineAggregationBuilder.java @@ -157,6 +157,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java index 8bda1d59c5b57..11de2032e27d7 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java @@ -220,6 +220,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_10_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java index 2357b80a402b3..7132d9ffac309 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java @@ -150,7 +150,7 @@ public void testZeroToOne() throws Exception { } private String[] indices() { - return indicesAdmin().prepareGetIndex().addIndices("index").get().indices(); + return indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("index").get().indices(); } private void assertMinimumCapacity(AutoscalingCapacity.AutoscalingResources resources) { diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index 86f974ed13359..b3721ab3ac93e 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -78,24 +78,7 @@ tasks.register("follow-cluster", RestIntegTestTask) { useCluster leaderCluster systemProperty 'tests.target_cluster', 'follow' nonInputProperties.systemProperty 'java.security.policy', "file://${policyFile}" - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def leaderInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - } - def followInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("follow-cluster") - it.parameters.service = serviceProvider - } - def leaderUri = leaderInfo.map { it.getAllHttpSocketURI().get(0) } - def followerUri = followInfo.map { it.getAllHttpSocketURI().get(0) } - - nonInputProperties.systemProperty 'tests.leader_host', leaderUri + nonInputProperties.systemProperty 'tests.leader_host', getClusterInfo('leader-cluster').map { it.getAllHttpSocketURI().get(0) } nonInputProperties.systemProperty 'log', followCluster.map(c -> c.getFirstNode().getServerLog()) } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 61678784e6b38..d5bc9395fc9c0 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -29,7 +29,7 @@ def leaderCluster = testClusters.register('leader-cluster') { setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' user username: 'admin', password: 'admin-password', role: 'superuser' - setting 'path.repo', "${buildDir}/cluster/shared/repo/leader-cluster" + setting 'path.repo', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/leader-cluster" } def middleCluster = testClusters.register('middle-cluster') { @@ -55,25 +55,16 @@ def middleCluster = testClusters.register('middle-cluster') { tasks.register("leader-cluster", RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.target_cluster', 'leader' - systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" + systemProperty 'tests.leader_cluster_repository_path', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/leader-cluster" } tasks.register("middle-cluster", RestIntegTestTask) { dependsOn "leader-cluster" useCluster testClusters.named("leader-cluster") systemProperty 'tests.target_cluster', 'middle' - systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" + systemProperty 'tests.leader_cluster_repository_path', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/leader-cluster" - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } + def leaderUri = getClusterInfo('leader-cluster').map { it.allHttpSocketURI.get(0) } nonInputProperties.systemProperty 'tests.leader_host', leaderUri } @@ -82,24 +73,10 @@ tasks.register('follow-cluster', RestIntegTestTask) { useCluster leaderCluster useCluster middleCluster systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" - - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } + systemProperty 'tests.leader_cluster_repository_path', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/leader-cluster" - def middleUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("middle-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } + def leaderUri = getClusterInfo('leader-cluster').map { it.allHttpSocketURI.get(0) } + def middleUri = getClusterInfo('middle-cluster').map { it.allHttpSocketURI.get(0) } nonInputProperties.systemProperty 'tests.leader_host', leaderUri nonInputProperties.systemProperty 'tests.middle_host', middleUri } diff --git a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle index ff342accef277..ad4d2cb5afc7c 100644 --- a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle @@ -53,17 +53,7 @@ tasks.register('follow-cluster', RestIntegTestTask) { useCluster leaderCluster systemProperty 'tests.target_cluster', 'follow' - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def followInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("follow-cluster") - it.parameters.service = serviceProvider - } - def followUri = followInfo.map { it.allHttpSocketURI.get(0) } - + def followUri = getClusterInfo('follow-cluster').map { it.allHttpSocketURI.get(0) } nonInputProperties.systemProperty 'tests.leader_host', followUri } diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle index 848beb1da10ae..89ad8cad84987 100644 --- a/x-pack/plugin/ccr/qa/restart/build.gradle +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -55,18 +55,8 @@ tasks.register('follow-cluster', RestIntegTestTask) { useCluster leaderCluster systemProperty 'tests.target_cluster', 'follow' - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } - - nonInputProperties.systemProperty 'tests.leader_host', - "${-> leaderUri.get() }" + def leaderUri = getClusterInfo("leader-cluster").map { it.allHttpSocketURI.get(0) } + nonInputProperties.systemProperty 'tests.leader_host', leaderUri } tasks.register("followClusterRestartTest", StandaloneRestIntegTestTask) { @@ -76,27 +66,13 @@ tasks.register("followClusterRestartTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.load_packaged', 'false' systemProperty 'tests.target_cluster', 'follow-restart' - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } - - def followUris = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("follow-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.join(",") } - + def leaderUri = getClusterInfo('leader-cluster').map { it.allHttpSocketURI.get(0) } + def followUris = getClusterInfo('follow-cluster').map { it.allHttpSocketURI.join(",") } nonInputProperties.systemProperty 'tests.leader_host', leaderUri nonInputProperties.systemProperty 'tests.rest.cluster', followUris doFirst { - serviceProvider.get().restart(clusterPath, "follow-cluster") + getRegistry().get().restart(clusterPath, "follow-cluster") } } diff --git a/x-pack/plugin/ccr/qa/security/build.gradle b/x-pack/plugin/ccr/qa/security/build.gradle index 454a9ae721736..3ceb86a632e0a 100644 --- a/x-pack/plugin/ccr/qa/security/build.gradle +++ b/x-pack/plugin/ccr/qa/security/build.gradle @@ -58,16 +58,7 @@ def followerClusterTestTask = tasks.register('follow-cluster', RestIntegTestTask dependsOn 'leader-cluster' useCluster leadCluster systemProperty 'tests.target_cluster', 'follow' - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } - + def leaderUri = getClusterInfo('leader-cluster').map { it.allHttpSocketURI.get(0) } nonInputProperties.systemProperty 'tests.leader_host', leaderUri } diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index e50627822e315..ba2c1bd18b435 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -331,7 +331,7 @@ public void testFollowIndexWithoutWaitForComplete() throws Exception { assertFalse(response.isIndexFollowingStarted()); // Check that the index exists, would throw index not found exception if the index is missing - followerClient().admin().indices().prepareGetIndex().addIndices("index2").get(); + followerClient().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("index2").get(); ensureFollowerGreen(true, "index2"); final Map firstBatchNumDocsPerShard = new HashMap<>(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 01a4076c58bd8..5749bf762e2e0 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -531,7 +532,8 @@ static String[] extractLeaderShardHistoryUUIDs(Map ccrIndexMetad MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, EngineConfig.INDEX_CODEC_SETTING, DataTier.TIER_PREFERENCE_SETTING, - IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING + IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING, + MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING ); public static Settings filter(Settings originalSettings) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java index 4a1d26d05a980..cb2aafd2300cf 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.action.RestActionListener; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -105,7 +106,12 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME) ); final CompletableFuture indexUUIDCompletableFuture = indexNameCompletableFuture.thenCompose( - concreteIndexName -> asyncGetIndexUUID(client, concreteIndexName, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME)) + concreteIndexName -> asyncGetIndexUUID( + client, + concreteIndexName, + client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME), + RestUtils.getMasterNodeTimeout(restRequest) + ) ); final CompletableFuture shardStatsCompletableFuture = indexNameCompletableFuture.thenCompose( concreteIndexName -> asyncShardStats(client, concreteIndexName, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME)) @@ -336,18 +342,20 @@ private static CompletableFuture asyncShardStats( * @param client The NodeClient for executing the asynchronous request. * @param concreteIndexName The name of the index for which to retrieve the index UUID. * @param executorService The executorService service for executing the asynchronous task. + * @param masterTimeout The timeout for waiting until the cluster is unblocked. * @return A CompletableFuture that completes with the retrieved index UUID. * @throws ElasticsearchException If an error occurs while retrieving the index UUID. */ private static CompletableFuture asyncGetIndexUUID( final NodeClient client, final String concreteIndexName, - final ExecutorService executorService + final ExecutorService executorService, + TimeValue masterTimeout ) { return supplyAsyncTask( () -> client.admin() .indices() - .prepareGetIndex() + .prepareGetIndex(masterTimeout) .setIndices(concreteIndexName) .get(GET_INDEX_UUID_TIMEOUT) .getSetting(concreteIndexName, IndexMetadata.SETTING_INDEX_UUID), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 618489abd687e..db4fce8a72cb9 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -449,14 +449,18 @@ private ClusterHealthStatus ensureColor( } protected final Index resolveLeaderIndex(String index) { - GetIndexResponse getIndexResponse = leaderClient().admin().indices().prepareGetIndex().setIndices(index).get(); + GetIndexResponse getIndexResponse = leaderClient().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(index).get(); assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); String uuid = getIndexResponse.getSettings().get(index).get(IndexMetadata.SETTING_INDEX_UUID); return new Index(index, uuid); } protected final Index resolveFollowerIndex(String index) { - GetIndexResponse getIndexResponse = followerClient().admin().indices().prepareGetIndex().setIndices(index).get(); + GetIndexResponse getIndexResponse = followerClient().admin() + .indices() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) + .setIndices(index) + .get(); assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); String uuid = getIndexResponse.getSettings().get(index).get(IndexMetadata.SETTING_INDEX_UUID); return new Index(index, uuid); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java index ab3234f6d3d73..041be4ea40e6d 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java @@ -72,7 +72,7 @@ public void testDefaultIndexAllocateToContent() { indicesAdmin().prepareCreate(index).setWaitForActiveShards(0).get(); - Settings idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(index); + Settings idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(index); assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_CONTENT)); // index should be red @@ -248,7 +248,7 @@ public void testDesiredNodesAreTakenIntoAccountInAutoExpandReplicas() throws Exc ) .get(); - var replicas = indicesAdmin().prepareGetIndex() + var replicas = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) .setIndices(index) .get() .getSetting(index, INDEX_NUMBER_OF_REPLICAS_SETTING.getKey()); @@ -261,7 +261,7 @@ public void testDesiredNodesAreTakenIntoAccountInAutoExpandReplicas() throws Exc updateDesiredNodes(desiredNodesWithoutColdTier); assertBusy(() -> { - var newReplicaCount = indicesAdmin().prepareGetIndex() + var newReplicaCount = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) .setIndices(index) .get() .getSetting(index, INDEX_NUMBER_OF_REPLICAS_SETTING.getKey()); @@ -280,7 +280,7 @@ public void testOverrideDefaultAllocation() { .setSettings(Settings.builder().put(DataTier.TIER_PREFERENCE, DataTier.DATA_WARM)) .get(); - Settings idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(index); + Settings idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(index); assertThat(idxSettings.get(DataTier.TIER_PREFERENCE), equalTo(DataTier.DATA_WARM)); // index should be yellow @@ -297,7 +297,7 @@ public void testRequestSettingOverridden() { .setSettings(Settings.builder().putNull(DataTier.TIER_PREFERENCE)) // will be overridden to data_content .get(); - Settings idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(index); + Settings idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(index); assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo("data_content")); // index should be yellow @@ -333,7 +333,11 @@ public void testShrinkStaysOnTier() { ensureGreen(index + "-shrunk"); - Settings idxSettings = indicesAdmin().prepareGetIndex().addIndices(index + "-shrunk").get().getSettings().get(index + "-shrunk"); + Settings idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) + .addIndices(index + "-shrunk") + .get() + .getSettings() + .get(index + "-shrunk"); // It should inherit the setting of its originator assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_WARM)); @@ -353,7 +357,7 @@ public void testTemplateOverridden() { indicesAdmin().prepareCreate(index).setWaitForActiveShards(0).get(); - Settings idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(index); + Settings idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(index); assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo("data_content")); // index should be yellow diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java index a38170d87f9a1..873a5b4587e0e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java @@ -28,7 +28,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.license.internal.MutableLicenseService; import org.elasticsearch.license.internal.TrialLicenseVersion; @@ -65,7 +64,6 @@ public class ClusterStateLicenseService extends AbstractLifecycleComponent private final Settings settings; private final ClusterService clusterService; - private final FeatureService featureService; /** * The xpack feature state to update when license changes are made. @@ -104,12 +102,10 @@ public ClusterStateLicenseService( ThreadPool threadPool, ClusterService clusterService, Clock clock, - XPackLicenseState xPacklicenseState, - FeatureService featureService + XPackLicenseState xPacklicenseState ) { this.settings = settings; this.clusterService = clusterService; - this.featureService = featureService; this.startTrialTaskQueue = clusterService.createTaskQueue( "license-service-start-trial", Priority.NORMAL, @@ -344,7 +340,7 @@ public void startTrialLicense(PostStartTrialRequest request, final ActionListene } startTrialTaskQueue.submitTask( StartTrialClusterTask.TASK_SOURCE, - new StartTrialClusterTask(logger, clusterService.getClusterName().value(), clock, featureService, request, listener), + new StartTrialClusterTask(logger, clusterService.getClusterName().value(), clock, request, listener), null // TODO should pass in request.masterNodeTimeout() here ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index 1d6f53c9aa23e..d3347f3432a47 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -39,8 +38,6 @@ */ public class License implements ToXContentObject { - public static final NodeFeature INDEPENDENT_TRIAL_VERSION_FEATURE = new NodeFeature("license-trial-independent-version", true); - public enum LicenseType { BASIC, STANDARD, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 22f4de105cb2d..bec2ee98b1757 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.license.internal.TrialLicenseVersion; import org.elasticsearch.xpack.core.XPackPlugin; @@ -41,13 +40,11 @@ public class StartTrialClusterTask implements ClusterStateTaskListener { private final PostStartTrialRequest request; private final ActionListener listener; private final Clock clock; - private final FeatureService featureService; StartTrialClusterTask( Logger logger, String clusterName, Clock clock, - FeatureService featureService, PostStartTrialRequest request, ActionListener listener ) { @@ -56,7 +53,6 @@ public class StartTrialClusterTask implements ClusterStateTaskListener { this.request = request; this.listener = listener; this.clock = clock; - this.featureService = featureService; } private LicensesMetadata execute( @@ -65,9 +61,6 @@ private LicensesMetadata execute( ClusterStateTaskExecutor.TaskContext taskContext ) { assert taskContext.getTask() == this; - if (featureService.clusterHasFeature(state, License.INDEPENDENT_TRIAL_VERSION_FEATURE) == false) { - throw new IllegalStateException("Please ensure all nodes are up to date before starting your trial"); - } final var listener = ActionListener.runBefore(this.listener, () -> { logger.debug("started self generated trial license: {}", currentLicensesMetadata); }); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/TrialLicenseVersion.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/TrialLicenseVersion.java index 4fa8332df9fb5..5ddfe365a32de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/TrialLicenseVersion.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/TrialLicenseVersion.java @@ -27,11 +27,8 @@ public class TrialLicenseVersion implements ToXContentFragment, Writeable { // generic Elasticsearch version. While it's derived from the Elasticsearch version formula for BWC, it is independent of it going // forward. When we want users to be able to start a new trial, increment this number. // Pkg-private for testing only. - static final int TRIAL_VERSION_CUTOVER = 8_12_00_99; - public static final TrialLicenseVersion CURRENT = new TrialLicenseVersion(TRIAL_VERSION_CUTOVER); - - // The most recently released major version when we cut over. Here for maintaining BWC behavior. - static final int TRIAL_VERSION_CUTOVER_MAJOR = 8; + static final int CURRENT_TRIAL_VERSION = 9_00_00_00; + public static final TrialLicenseVersion CURRENT = new TrialLicenseVersion(CURRENT_TRIAL_VERSION); private final int trialVersion; @@ -84,10 +81,6 @@ int asInt() { public boolean ableToStartNewTrial() { assert trialVersion <= CURRENT.trialVersion : "trial version [" + trialVersion + "] cannot be greater than CURRENT [" + CURRENT.trialVersion + "]"; - if (trialVersion < TRIAL_VERSION_CUTOVER) { - int sinceMajorVersion = trialVersion / 1_000_000; // integer division is intentional - return sinceMajorVersion < TRIAL_VERSION_CUTOVER_MAJOR; - } return trialVersion != CURRENT.trialVersion; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java index 42f66c6ca6ee8..42824a553d2bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java @@ -9,8 +9,6 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.license.License; -import org.elasticsearch.xpack.core.datatiers.NodesDataTiersUsageTransportAction; import java.util.Set; @@ -18,16 +16,8 @@ * Provides the XPack features that this version of the code supports */ public class XPackFeatures implements FeatureSpecification { - public static final NodeFeature LOGSDB_TELEMETRY = new NodeFeature("logsdb_telemetry", true); - public static final NodeFeature LOGSDB_TELMETRY_STATS = new NodeFeature("logsdb_telemetry_stats", true); - @Override public Set getFeatures() { - return Set.of( - NodesDataTiersUsageTransportAction.LOCALLY_PRECALCULATED_STATS_FEATURE, // Added in 8.12 - License.INDEPENDENT_TRIAL_VERSION_FEATURE, // 8.14.0 - LOGSDB_TELEMETRY, - LOGSDB_TELMETRY_STATS - ); + return Set.of(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index f79a3fbf124b1..cf5ebc8adc56d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -331,8 +331,7 @@ public Collection createComponents(PluginServices services) { services.threadPool(), services.clusterService(), getClock(), - getLicenseState(), - services.featureService() + getLicenseState() ); setLicenseService(licenseService); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index 4c6c18db367da..3aeefa3afb796 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -263,12 +263,7 @@ public static AutoFollowPattern readFrom(StreamInput in) throws IOException { final String remoteCluster = in.readString(); final List leaderIndexPatterns = in.readStringCollectionAsList(); final String followIndexPattern = in.readOptionalString(); - final Settings settings; - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings = Settings.readSettingsFromStream(in); - } else { - settings = Settings.EMPTY; - } + final Settings settings = Settings.readSettingsFromStream(in); return new AutoFollowPattern(remoteCluster, leaderIndexPatterns, followIndexPattern, settings, in); } @@ -345,9 +340,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexPattern); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings.writeTo(out); - } + settings.writeTo(out); super.writeTo(out); out.writeBoolean(active); if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index dcee7274632eb..07b18f7dc4f91 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -189,9 +189,7 @@ public Request(StreamInput in) throws IOException { remoteCluster = in.readString(); leaderIndexPatterns = in.readStringCollectionAsList(); followIndexNamePattern = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings = Settings.readSettingsFromStream(in); - } + settings = Settings.readSettingsFromStream(in); parameters = new FollowParameters(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { leaderIndexExclusionPatterns = in.readStringCollectionAsList(); @@ -205,9 +203,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexNamePattern); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings.writeTo(out); - } + settings.writeTo(out); parameters.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { out.writeStringCollection(leaderIndexExclusionPatterns); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 82941c440484d..d902b54dfbdb4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -194,9 +194,7 @@ public Request(StreamInput in) throws IOException { this.remoteCluster = in.readString(); this.leaderIndex = in.readString(); this.followerIndex = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - this.settings = Settings.readSettingsFromStream(in); - } + this.settings = Settings.readSettingsFromStream(in); this.parameters = new FollowParameters(in); waitForActiveShards(ActiveShardCount.readFrom(in)); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { @@ -210,9 +208,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeString(leaderIndex); out.writeString(followerIndex); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings.writeTo(out); - } + settings.writeTo(out); parameters.writeTo(out); waitForActiveShards.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java index f8cb9b913b4ae..379b07d9b9a77 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java @@ -42,7 +42,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java index a33dd7dff3469..48002e6ed41fc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java @@ -45,7 +45,7 @@ public DataTiersFeatureSetUsage(Map tierStats) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_10_0; + return TransportVersions.ZERO; } public Map getTierStats() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java index 9f265f86fae21..e859d81036669 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; @@ -22,7 +21,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.NodeIndicesStats; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.protocol.xpack.XPackUsageRequest; @@ -46,7 +44,6 @@ public class DataTiersUsageTransportAction extends XPackUsageFeatureTransportAction { private final Client client; - private final FeatureService featureService; @Inject public DataTiersUsageTransportAction( @@ -55,8 +52,7 @@ public DataTiersUsageTransportAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Client client, - FeatureService featureService + Client client ) { super( XPackUsageFeatureAction.DATA_TIERS.name(), @@ -67,7 +63,6 @@ public DataTiersUsageTransportAction( indexNameExpressionResolver ); this.client = client; - this.featureService = featureService; } @Override @@ -77,42 +72,22 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - if (featureService.clusterHasFeature(state, NodesDataTiersUsageTransportAction.LOCALLY_PRECALCULATED_STATS_FEATURE)) { - new ParentTaskAssigningClient(client, clusterService.localNode(), task).admin() - .cluster() - .execute( - NodesDataTiersUsageTransportAction.TYPE, - new NodesDataTiersUsageTransportAction.NodesRequest(), - listener.delegateFailureAndWrap((delegate, response) -> { - // Generate tier specific stats for the nodes and indices - delegate.onResponse( - new XPackUsageFeatureResponse( - new DataTiersFeatureSetUsage( - aggregateStats(response.getNodes(), getIndicesGroupedByTier(state, response.getNodes())) - ) - ) - ); - }) - ); - } else { - new ParentTaskAssigningClient(client, clusterService.localNode(), task).admin() - .cluster() - .prepareNodesStats() - .setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store)) - .execute(listener.delegateFailureAndWrap((delegate, nodesStatsResponse) -> { - List response = nodesStatsResponse.getNodes() - .stream() - .map( - nodeStats -> new NodeDataTiersUsage(nodeStats.getNode(), precalculateLocalStatsFromNodeStats(nodeStats, state)) - ) - .toList(); + new ParentTaskAssigningClient(client, clusterService.localNode(), task).admin() + .cluster() + .execute( + NodesDataTiersUsageTransportAction.TYPE, + new NodesDataTiersUsageTransportAction.NodesRequest(), + listener.delegateFailureAndWrap((delegate, response) -> { + // Generate tier specific stats for the nodes and indices delegate.onResponse( new XPackUsageFeatureResponse( - new DataTiersFeatureSetUsage(aggregateStats(response, getIndicesGroupedByTier(state, response))) + new DataTiersFeatureSetUsage( + aggregateStats(response.getNodes(), getIndicesGroupedByTier(state, response.getNodes())) + ) ) ); - })); - } + }) + ); } // Visible for testing diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java index 39cbb9788327b..99bb926b0a3c7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.indices.IndicesService; @@ -58,7 +57,6 @@ public class NodesDataTiersUsageTransportAction extends TransportNodesAction< Void> { public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/data_tier_usage"); - public static final NodeFeature LOCALLY_PRECALCULATED_STATS_FEATURE = new NodeFeature("usage.data_tiers.precalculate_stats", true); private static final CommonStatsFlags STATS_FLAGS = new CommonStatsFlags().clear() .set(CommonStatsFlags.Flag.Docs, true) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java index 0edbda79ed975..96742c1e5e57f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java @@ -54,7 +54,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java index feb3a2e3191ff..c6d70543d89ed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java @@ -71,7 +71,7 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } public static class Builder implements InferenceConfigUpdate.Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java index 34d3b1c1e38f5..fdd8735bb2454 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java @@ -55,7 +55,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java index d7d0320b602b4..70a5e0c7a3ce2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java @@ -34,7 +34,7 @@ public SearchableSnapshotFeatureSetUsage(StreamInput input) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index 9704335776f11..d0d5e463f9652 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -596,6 +596,8 @@ static Map getSSLSettingsMap(Settings settings) { sslSettingsMap.put(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX, settings.getByPrefix(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX)); sslSettingsMap.put(XPackSettings.TRANSPORT_SSL_PREFIX, settings.getByPrefix(XPackSettings.TRANSPORT_SSL_PREFIX)); sslSettingsMap.putAll(getTransportProfileSSLSettings(settings)); + // Mount Elastic Inference Service (part of the Inference plugin) configuration + sslSettingsMap.put("xpack.inference.elastic.http.ssl", settings.getByPrefix("xpack.inference.elastic.http.ssl.")); // Only build remote cluster server SSL if the port is enabled if (REMOTE_CLUSTER_SERVER_ENABLED.get(settings)) { sslSettingsMap.put(XPackSettings.REMOTE_CLUSTER_SERVER_SSL_PREFIX, getRemoteClusterServerSslSettings(settings)); diff --git a/x-pack/plugin/core/src/main/plugin-metadata/entitlement-policy.yaml b/x-pack/plugin/core/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..8983dd6663e65 --- /dev/null +++ b/x-pack/plugin/core/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,4 @@ +org.apache.httpcomponents.httpclient: + - network: + actions: + - connect # For SamlRealm diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractClusterStateLicenseServiceTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractClusterStateLicenseServiceTestCase.java index a3a12792df4aa..a85656d8e32bd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractClusterStateLicenseServiceTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractClusterStateLicenseServiceTestCase.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -27,7 +26,6 @@ import org.junit.After; import org.junit.Before; -import java.util.List; import java.util.stream.Stream; import static java.util.Collections.emptySet; @@ -66,14 +64,7 @@ protected void setInitialState(License license, XPackLicenseState licenseState, protected void setInitialState(License license, XPackLicenseState licenseState, Settings settings, String selfGeneratedType) { licenseType = selfGeneratedType; settings = Settings.builder().put(settings).put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), licenseType).build(); - licenseService = new ClusterStateLicenseService( - settings, - threadPool, - clusterService, - clock, - licenseState, - new FeatureService(List.of()) - ); + licenseService = new ClusterStateLicenseService(settings, threadPool, clusterService, clock, licenseState); ClusterState state = mock(ClusterState.class); final ClusterBlocks noBlock = ClusterBlocks.builder().build(); when(state.blocks()).thenReturn(noBlock); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java index aaadecef6021c..61530d1bd77cb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.license.licensor.LicenseSigner; import org.elasticsearch.protocol.xpack.license.LicensesStatus; import org.elasticsearch.protocol.xpack.license.PutLicenseResponse; @@ -91,8 +90,7 @@ public void testLogExpirationWarning() { mock(ThreadPool.class), mockDefaultClusterService(), mock(Clock.class), - mock(XPackLicenseState.class), - new FeatureService(List.of()) + mock(XPackLicenseState.class) ); final String message = service.buildExpirationMessage(time, expired).toString(); if (expired) { @@ -187,8 +185,7 @@ public void testStartBasicStartsNewLicenseIfFieldsDifferent() throws Exception { mock(ThreadPool.class), clusterService, clock, - mock(XPackLicenseState.class), - new FeatureService(List.of()) + mock(XPackLicenseState.class) ); verify(clusterService).createTaskQueue(eq("license-service-start-basic"), any(), taskExecutorCaptor.capture()); @@ -280,8 +277,7 @@ private void tryRegisterLicense(Settings baseSettings, License license, Consumer threadPool, clusterService, clock, - licenseState, - new FeatureService(List.of()) + licenseState ); final PutLicenseRequest request = new PutLicenseRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseScheduleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseScheduleTests.java index d7b308bc51f46..78bd500b55e24 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseScheduleTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseScheduleTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.junit.Before; @@ -18,7 +17,6 @@ import java.time.Clock; import java.time.Instant; import java.time.format.DateTimeFormatter; -import java.util.List; import java.util.Locale; import static org.hamcrest.Matchers.equalTo; @@ -37,8 +35,7 @@ public void setup() throws Exception { mock(ThreadPool.class), mock(ClusterService.class), mock(Clock.class), - mock(XPackLicenseState.class), - new FeatureService(List.of()) + mock(XPackLicenseState.class) ); schedule = service.nextLicenseCheck(license); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java index cc3a3e41af63d..ead7f6ae1b0d8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java @@ -11,8 +11,7 @@ import org.elasticsearch.test.ESTestCase; import static org.elasticsearch.license.internal.TrialLicenseVersion.CURRENT; -import static org.elasticsearch.license.internal.TrialLicenseVersion.TRIAL_VERSION_CUTOVER; -import static org.elasticsearch.license.internal.TrialLicenseVersion.TRIAL_VERSION_CUTOVER_MAJOR; +import static org.elasticsearch.license.internal.TrialLicenseVersion.CURRENT_TRIAL_VERSION; import static org.hamcrest.Matchers.equalTo; public class TrialLicenseVersionTests extends ESTestCase { @@ -20,13 +19,9 @@ public class TrialLicenseVersionTests extends ESTestCase { public void testCanParseAllVersions() { for (var version : Version.getDeclaredVersions(Version.class)) { // Only consider versions before the cut-over; the comparison becomes meaningless after the cut-over point - if (version.onOrBefore(Version.fromId(TRIAL_VERSION_CUTOVER))) { + if (version.onOrBefore(Version.fromId(CURRENT_TRIAL_VERSION))) { TrialLicenseVersion parsedVersion = TrialLicenseVersion.fromXContent(version.toString()); - if (version.major < TRIAL_VERSION_CUTOVER_MAJOR) { - assertTrue(parsedVersion.ableToStartNewTrial()); - } else { - assertFalse(parsedVersion.ableToStartNewTrial()); - } + assertTrue(parsedVersion.ableToStartNewTrial()); } } } @@ -38,9 +33,10 @@ public void testRoundTripParsing() { public void testNewTrialAllowed() { assertTrue(new TrialLicenseVersion(randomIntBetween(7_00_00_00, 7_99_99_99)).ableToStartNewTrial()); + assertTrue(new TrialLicenseVersion(randomIntBetween(8_00_00_00, 8_99_99_99)).ableToStartNewTrial()); assertFalse(new TrialLicenseVersion(CURRENT.asInt()).ableToStartNewTrial()); - assertFalse(new TrialLicenseVersion(randomIntBetween(8_00_00_00, TRIAL_VERSION_CUTOVER)).ableToStartNewTrial()); - final int trialVersion = randomIntBetween(TRIAL_VERSION_CUTOVER, CURRENT.asInt()); + assertFalse(new TrialLicenseVersion(randomIntBetween(9_00_00_00, CURRENT_TRIAL_VERSION)).ableToStartNewTrial()); + final int trialVersion = randomIntBetween(CURRENT_TRIAL_VERSION, CURRENT.asInt()); if (trialVersion < CURRENT.asInt()) { assertTrue(new TrialLicenseVersion(trialVersion).ableToStartNewTrial()); } else { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index 1f2c89c473a62..d50f7bb27a5df 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -623,7 +623,7 @@ public Map getSnapshotCommitSup } @SuppressWarnings("unchecked") - private List filterPlugins(Class type) { + protected List filterPlugins(Class type) { return plugins.stream().filter(x -> type.isAssignableFrom(x.getClass())).map(p -> ((T) p)).collect(Collectors.toList()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java index f1529fafaaffe..18f9ad508eef3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java @@ -131,7 +131,10 @@ public void testAutoCreateIndex() throws Exception { } private void assertSettings() { - GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices(index)).actionGet(); + GetIndexResponse getIndexResponse = client().admin() + .indices() + .getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)) + .actionGet(); Settings settings = getIndexResponse.getSettings().get(index); Settings expected = AsyncTaskIndexService.settings(); assertThat(expected, is(settings.filter(expected::hasValue))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/RemoteClusterMinimumVersionValidationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/RemoteClusterMinimumVersionValidationTests.java index 299279ee13f1b..2781aa9d18c64 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/RemoteClusterMinimumVersionValidationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/RemoteClusterMinimumVersionValidationTests.java @@ -33,7 +33,7 @@ public class RemoteClusterMinimumVersionValidationTests extends ESTestCase { - private static final TransportVersion MIN_EXPECTED_VERSION = TransportVersions.V_7_11_0; + private static final TransportVersion MIN_EXPECTED_VERSION = TransportVersions.V_8_11_X; private static final String REASON = "some reason"; private Context context; @@ -41,9 +41,9 @@ public class RemoteClusterMinimumVersionValidationTests extends ESTestCase { @Before public void setUpMocks() { context = spy(new Context(null, null, null, null, null, null, null, null, null, null)); - doReturn(TransportVersions.V_7_10_0).when(context).getRemoteClusterVersion("cluster-A"); - doReturn(TransportVersions.V_7_11_0).when(context).getRemoteClusterVersion("cluster-B"); - doReturn(TransportVersions.V_7_12_0).when(context).getRemoteClusterVersion("cluster-C"); + doReturn(TransportVersions.V_8_10_X).when(context).getRemoteClusterVersion("cluster-A"); + doReturn(TransportVersions.V_8_11_X).when(context).getRemoteClusterVersion("cluster-B"); + doReturn(TransportVersions.V_8_12_0).when(context).getRemoteClusterVersion("cluster-C"); } public void testGetters() { @@ -82,8 +82,8 @@ public void testValidate_OneRemoteClusterVersionTooLow() { ctx -> assertThat( ctx.getValidationException().validationErrors(), contains( - "remote clusters are expected to run at least version [7.11.0] (reason: [some reason]), " - + "but the following clusters were too old: [cluster-A (7.10.0)]" + "remote clusters are expected to run at least version [8.11.0-8.11.4] (reason: [some reason]), " + + "but the following clusters were too old: [cluster-A (8.10.0-8.10.4)]" ) ) ) @@ -93,15 +93,15 @@ public void testValidate_OneRemoteClusterVersionTooLow() { public void testValidate_TwoRemoteClusterVersionsTooLow() { doReturn(new HashSet<>(Arrays.asList("cluster-A", "cluster-B", "cluster-C"))).when(context).getRegisteredRemoteClusterNames(); doReturn(new TreeSet<>(Arrays.asList("cluster-A:dummy", "cluster-B:dummy", "cluster-C:dummy"))).when(context).resolveRemoteSource(); - SourceDestValidation validation = new RemoteClusterMinimumVersionValidation(TransportVersions.V_7_12_0, REASON); + SourceDestValidation validation = new RemoteClusterMinimumVersionValidation(TransportVersions.V_8_12_0, REASON); validation.validate( context, ActionTestUtils.assertNoFailureListener( ctx -> assertThat( ctx.getValidationException().validationErrors(), contains( - "remote clusters are expected to run at least version [7.12.0] (reason: [some reason]), " - + "but the following clusters were too old: [cluster-A (7.10.0), cluster-B (7.11.0)]" + "remote clusters are expected to run at least version [8.12.0] (reason: [some reason]), " + + "but the following clusters were too old: [cluster-A (8.10.0-8.10.4), cluster-B (8.11.0-8.11.4)]" ) ) ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionRequestTests.java index 120c2a6dbc5e7..c85d9a0561417 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionRequestTests.java @@ -183,7 +183,7 @@ public static UnifiedCompletionRequest randomUnifiedCompletionRequest() { return new UnifiedCompletionRequest( randomList(5, UnifiedCompletionRequestTests::randomMessage), randomAlphaOfLengthOrNull(10), - randomPositiveLongOrNull(), + randomNonNegativeLongOrNull(), randomStopOrNull(), randomFloatOrNull(), randomToolChoiceOrNull(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java index 9663e41a647a8..bfac286bc3c35 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java @@ -614,7 +614,8 @@ public void testGetConfigurationByContextName() throws Exception { "xpack.security.authc.realms.ldap.realm1.ssl", "xpack.security.authc.realms.saml.realm2.ssl", "xpack.monitoring.exporters.mon1.ssl", - "xpack.monitoring.exporters.mon2.ssl" }; + "xpack.monitoring.exporters.mon2.ssl", + "xpack.inference.elastic.http.ssl" }; assumeTrue("Not enough cipher suites are available to support this test", getCipherSuites.length >= contextNames.length); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json index 50f3ab6bf9a08..b0c99bf8a0cea 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json @@ -55,6 +55,13 @@ } } }, + "os": { + "properties": { + "type": { + "type": "keyword" + } + } + }, "profiling": { "properties": { "project.id": { diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java index 365f31f8e5fe1..d8b27cd52724b 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java @@ -242,7 +242,7 @@ private void assertDocumentsExist(final String nodeName, final String indexName) private void assertIndexExists(final String nodeName, final String indexName) { final GetIndexResponse getIndexResponse = client(nodeName).admin() .indices() - .prepareGetIndex() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) .addIndices(indexName) .addFeatures(GetIndexRequest.Feature.values()) .get(); @@ -255,7 +255,7 @@ private void assertIndexDoesNotExist(final String nodeName, final String indexNa "Index [" + indexName + "] was not deleted", () -> client(nodeName).admin() .indices() - .prepareGetIndex() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) .addIndices(indexName) .addFeatures(GetIndexRequest.Feature.values()) .get() diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java index 5fba98b765a6b..aa9f6f804dab4 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java @@ -203,7 +203,7 @@ private void assertTargetIndex(final InternalTestCluster cluster, final String t final GetIndexResponse getIndexResponse = cluster.client() .admin() .indices() - .getIndex(new GetIndexRequest().indices(targetIndex)) + .getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(targetIndex)) .actionGet(); assertEquals(1, getIndexResponse.indices().length); assertResponse( diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 31b415270915b..ce9b60938526a 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -449,7 +449,9 @@ public void testCopyIndexSettings() throws IOException { prepareSourceIndex(sourceIndex, true); downsample(sourceIndex, downsampleIndex, config); - GetIndexResponse indexSettingsResp = indicesAdmin().prepareGetIndex().addIndices(sourceIndex, downsampleIndex).get(); + GetIndexResponse indexSettingsResp = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) + .addIndices(sourceIndex, downsampleIndex) + .get(); assertDownsampleIndexSettings(sourceIndex, downsampleIndex, indexSettingsResp); for (String key : settings.keySet()) { if (LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey().equals(key)) { @@ -605,7 +607,7 @@ public void onFailure(Exception e) { ); assertBusy(() -> { try { - assertEquals(indicesAdmin().prepareGetIndex().addIndices(downsampleIndex).get().getIndices().length, 1); + assertEquals(indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(downsampleIndex).get().getIndices().length, 1); } catch (IndexNotFoundException e) { fail("downsample index has not been created"); } @@ -1211,7 +1213,9 @@ private void assertDownsampleIndex(String sourceIndex, String downsampleIndex, D assertDownsampleIndexAggregations(sourceIndex, downsampleIndex, config, metricFields, labelFields); - GetIndexResponse indexSettingsResp = indicesAdmin().prepareGetIndex().addIndices(sourceIndex, downsampleIndex).get(); + GetIndexResponse indexSettingsResp = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) + .addIndices(sourceIndex, downsampleIndex) + .get(); assertDownsampleIndexSettings(sourceIndex, downsampleIndex, indexSettingsResp); Map> mappings = (Map>) indexSettingsResp.getMappings() diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index 37498bb9d54ab..d8276ef0d5118 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -147,7 +147,7 @@ public void run(ActionListener listener) { // Collect the source index information final String[] sourceIndices = policy.getIndices().toArray(new String[0]); logger.debug("Policy [{}]: Checking source indices [{}]", policyName, sourceIndices); - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(sourceIndices); + GetIndexRequest getIndexRequest = new GetIndexRequest(ENRICH_MASTER_REQUEST_TIMEOUT).indices(sourceIndices); // This call does not set the origin to ensure that the user executing the policy has permission to access the source index client.admin().indices().getIndex(getIndexRequest, l); }) diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyAction.java index e88d830111dea..b69a1731ed88e 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyAction.java @@ -123,8 +123,9 @@ protected void masterOperation( } try { - final GetIndexRequest indices = new GetIndexRequest().indices(EnrichPolicy.getBaseName(policyName) + "-*") - .indicesOptions(IndicesOptions.lenientExpand()); + final GetIndexRequest indices = new GetIndexRequest(request.masterNodeTimeout()).indices( + EnrichPolicy.getBaseName(policyName) + "-*" + ).indicesOptions(IndicesOptions.lenientExpand()); String[] concreteIndices = indexNameExpressionResolver.concreteIndexNamesWithSystemIndexAccess(state, indices); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java index 0c19c2867a489..4b0aa73b95619 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java @@ -137,7 +137,10 @@ protected void scheduleNext() { } private void assertEnrichIndicesExist(Set activeIndices) { - GetIndexResponse indices = client().admin().indices().getIndex(new GetIndexRequest().indices(".enrich-*")).actionGet(); + GetIndexResponse indices = client().admin() + .indices() + .getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(".enrich-*")) + .actionGet(); assertThat(indices.indices().length, is(equalTo(activeIndices.size()))); for (String index : indices.indices()) { assertThat(activeIndices.contains(index), is(true)); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java index 65d53d3adabe7..3a27d1a3fcb1a 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java @@ -346,7 +346,8 @@ private void testNumberRangeMatchType(String rangeType) throws Exception { } private GetIndexResponse getGetIndexResponseAndCheck(String createdEnrichIndex) { - GetIndexResponse enrichIndex = indicesAdmin().getIndex(new GetIndexRequest().indices(".enrich-test1")).actionGet(); + GetIndexResponse enrichIndex = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(".enrich-test1")) + .actionGet(); assertThat(enrichIndex.getIndices().length, equalTo(1)); assertThat(enrichIndex.getIndices()[0], equalTo(createdEnrichIndex)); Settings settings = enrichIndex.getSettings().get(createdEnrichIndex); @@ -363,7 +364,8 @@ public void testRunnerRangeTypeWithIpRange() throws Exception { .actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - GetIndexResponse sourceIndex = indicesAdmin().getIndex(new GetIndexRequest().indices(sourceIndexName)).actionGet(); + GetIndexResponse sourceIndex = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(sourceIndexName)) + .actionGet(); // Validate Mapping Map sourceIndexMapping = sourceIndex.getMappings().get(sourceIndexName).sourceAsMap(); Map sourceIndexProperties = (Map) sourceIndexMapping.get("properties"); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java index 568f1074d1a5f..fd23b121810ec 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java @@ -142,7 +142,9 @@ public void testDeleteIsNotLocked() throws Exception { createIndex(EnrichPolicy.getIndexName(name, 1001)); createIndex(EnrichPolicy.getIndexName(name, 1002)); - indicesAdmin().prepareGetIndex().setIndices(EnrichPolicy.getIndexName(name, 1001), EnrichPolicy.getIndexName(name, 1002)).get(); + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) + .setIndices(EnrichPolicy.getIndexName(name, 1001), EnrichPolicy.getIndexName(name, 1002)) + .get(); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference reference = new AtomicReference<>(); @@ -169,7 +171,8 @@ public void onFailure(final Exception e) { expectThrows( IndexNotFoundException.class, - indicesAdmin().prepareGetIndex().setIndices(EnrichPolicy.getIndexName(name, 1001), EnrichPolicy.getIndexName(name, 1001)) + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) + .setIndices(EnrichPolicy.getIndexName(name, 1001), EnrichPolicy.getIndexName(name, 1001)) ); if (destructiveRequiresName) { @@ -307,7 +310,7 @@ public void onFailure(final Exception e) { assertNotNull(EnrichStore.getPolicy(otherName, clusterService.state())); // and the index associated with the other index should be unaffected - indicesAdmin().prepareGetIndex().setIndices(EnrichPolicy.getIndexName(otherName, 1001)).get(); + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(EnrichPolicy.getIndexName(otherName, 1001)).get(); } } } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml index c54d764fc4761..3a84bda7042a2 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml @@ -280,9 +280,7 @@ setup: --- "List Connectors - Soft deleted connectors / no deleted": - - requires: - cluster_features: ["connector_soft_deletes"] - reason: Soft deletes were introduced in 9.0 release + - do: connector.list: @@ -293,9 +291,7 @@ setup: --- "List Connectors - Single soft deleted connector": - - requires: - cluster_features: ["connector_soft_deletes"] - reason: Soft deletes were introduced in 9.0 release + - do: connector.delete: @@ -312,11 +308,91 @@ setup: - match: { count: 3 } + +--- +"List Connectors - Single hard deleted connector": + + + - do: + connector.delete: + connector_id: connector-a + hard: true + + - do: + connector.list: {} + + - match: { count: 2 } + + - do: + connector.list: + include_deleted: true + + - match: { count: 2 } + + +--- +"List Connectors - All hard deleted connectors": + + + - do: + connector.delete: + connector_id: connector-a + hard: true + + - do: + connector.delete: + connector_id: connector-b + hard: true + + - do: + connector.delete: + connector_id: connector-c + hard: true + + - do: + connector.list: {} + + - match: { count: 0 } + + - do: + connector.list: + include_deleted: true + + - match: { count: 0 } + +--- +"List Connectors - 2 hard deleted connectors, 1 soft deleted": + + + - do: + connector.delete: + connector_id: connector-a + hard: false + + - do: + connector.delete: + connector_id: connector-b + hard: true + + - do: + connector.delete: + connector_id: connector-c + hard: true + + - do: + connector.list: {} + + - match: { count: 0 } + + - do: + connector.list: + include_deleted: true + + - match: { count: 1 } + --- "List Connectors - Soft deleted connectors": - - requires: - cluster_features: ["connector_soft_deletes"] - reason: Soft deletes were introduced in 9.0 release + - do: connector.delete: @@ -353,9 +429,7 @@ setup: --- "List Connectors - Soft deleted with from": - - requires: - cluster_features: ["connector_soft_deletes"] - reason: Soft deletes were introduced in 9.0 release + - do: connector.delete: @@ -387,9 +461,7 @@ setup: --- "List Connector - Soft deleted with size": - - requires: - cluster_features: ["connector_soft_deletes"] - reason: Soft deletes were introduced in 9.0 release + - do: connector.delete: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/30_connector_delete.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/30_connector_delete.yml index aa820732be2e2..6cb0be2a737ef 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/30_connector_delete.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/30_connector_delete.yml @@ -27,6 +27,31 @@ setup: connector_id: test-connector-to-delete +--- +"Delete Connector - Hard Delete": + - do: + connector.put: + connector_id: test-connector-hard-delete + body: + index_name: search-2-test + name: my-hard-delete-connector + language: en + is_native: false + service_type: super-connector + + - do: + connector.delete: + connector_id: test-connector-hard-delete + hard: true + + - match: { acknowledged: true } + + - do: + catch: "missing" + connector.get: + connector_id: test-connector-hard-delete + include_deleted: true + --- "Delete Connector - deletes associated sync jobs": @@ -107,12 +132,9 @@ setup: connector.delete: connector_id: test-nonexistent-connector - --- "Delete Connector - Supports soft deletes": - - requires: - cluster_features: ["connector_soft_deletes"] - reason: Soft deletes were introduced in 9.0 release + - do: connector.delete: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml index 0b98182b39602..f29deb943d957 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml @@ -306,10 +306,6 @@ teardown: --- 'List query rulesets - include rule types': - - requires: - cluster_features: [ "query_rule_list_types" ] - reason: 'List responses updated in 8.15.5 and 8.16.1' - - do: query_rules.put_ruleset: ruleset_id: a-test-query-ruleset-with-lots-of-criteria @@ -389,4 +385,4 @@ teardown: suffix: 1 always: 1 - match: { results.0.rule_type_counts: { pinned: 4, exclude: 1 } } - + diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/70_query_rule_test.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/70_query_rule_test.yml index 016d9f10fe77f..433a0f6705c73 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/70_query_rule_test.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/70_query_rule_test.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: [ "query_rules.test" ] - reason: Introduced in 8.16.0 - - do: query_rules.put_ruleset: ruleset_id: test-ruleset diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/80_query_rules_retriever.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/80_query_rules_retriever.yml index 7967516c6ad5a..089a078c62207 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/80_query_rules_retriever.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/80_query_rules_retriever.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: 'query_rule_retriever_supported' - reason: 'test requires query rule retriever implementation' - - do: indices.create: index: test-index1 diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java index ba121f2cf865e..0f5c38e6f75ed 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java @@ -9,21 +9,13 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.xpack.application.rules.action.ListQueryRulesetsAction; -import org.elasticsearch.xpack.application.rules.retriever.QueryRuleRetrieverBuilder; import java.util.Set; -import static org.elasticsearch.xpack.application.rules.action.TestQueryRulesetAction.QUERY_RULES_TEST_API; - public class EnterpriseSearchFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of( - QUERY_RULES_TEST_API, - QueryRuleRetrieverBuilder.QUERY_RULE_RETRIEVERS_SUPPORTED, - ListQueryRulesetsAction.QUERY_RULE_LIST_TYPES - ); + return Set.of(); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java index bb80f5fee4ec9..3120124c17523 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java @@ -13,6 +13,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DelegatingActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; @@ -232,40 +234,71 @@ public void getConnector(String connectorId, boolean includeDeleted, ActionListe } /** - * Soft deletes the {@link Connector} and optionally removes the related instances of {@link ConnectorSyncJob} in the underlying index. + * Deletes the {@link Connector} and optionally removes the related instances of {@link ConnectorSyncJob} in the underlying index. * * @param connectorId The id of the {@link Connector}. + * @param hardDelete If set to true, the {@link Connector} is permanently deleted; otherwise, it is soft-deleted. * @param shouldDeleteSyncJobs The flag indicating if {@link ConnectorSyncJob} should also be deleted. * @param listener The action listener to invoke on response/failure. */ - public void deleteConnector(String connectorId, boolean shouldDeleteSyncJobs, ActionListener listener) { + public void deleteConnector( + String connectorId, + boolean hardDelete, + boolean shouldDeleteSyncJobs, + ActionListener listener + ) { try { - // ensure that if connector is soft-deleted, deleting it again results in 404 - getConnector(connectorId, false, listener.delegateFailure((l, connector) -> { - final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_INDEX_NAME, connectorId).setRefreshPolicy( - WriteRequest.RefreshPolicy.IMMEDIATE - ) - .doc( - new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) - .id(connectorId) - .source(Map.of(Connector.IS_DELETED_FIELD.getPreferredName(), true)) - ); - clientWithOrigin.update(updateRequest, new DelegatingIndexNotFoundActionListener<>(connectorId, l, (ll, updateResponse) -> { - if (updateResponse.getResult() == UpdateResponse.Result.NOT_FOUND) { - ll.onFailure(new ResourceNotFoundException(connectorNotFoundErrorMsg(connectorId))); - return; - } - if (shouldDeleteSyncJobs) { - new ConnectorSyncJobIndexService(clientWithOrigin).deleteAllSyncJobsByConnectorId( - connectorId, - ll.map(r -> updateResponse) + if (hardDelete) { + final DeleteRequest deleteRequest = new DeleteRequest(CONNECTOR_INDEX_NAME).id(connectorId) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + clientWithOrigin.delete( + deleteRequest, + new DelegatingIndexNotFoundActionListener<>(connectorId, listener, (l, deleteResponse) -> { + if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { + l.onFailure(new ResourceNotFoundException(connectorNotFoundErrorMsg(connectorId))); + return; + } + if (shouldDeleteSyncJobs) { + new ConnectorSyncJobIndexService(clientWithOrigin).deleteAllSyncJobsByConnectorId( + connectorId, + l.map(r -> deleteResponse) + ); + } else { + l.onResponse(deleteResponse); + } + }) + ); + } else { + getConnector(connectorId, false, listener.delegateFailure((l, connector) -> { + final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_INDEX_NAME, connectorId).setRefreshPolicy( + WriteRequest.RefreshPolicy.IMMEDIATE + ) + .doc( + new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) + .id(connectorId) + .source(Map.of(Connector.IS_DELETED_FIELD.getPreferredName(), true)) ); - } else { - ll.onResponse(updateResponse); - } + clientWithOrigin.update( + updateRequest, + new DelegatingIndexNotFoundActionListener<>(connectorId, l, (ll, updateResponse) -> { + if (updateResponse.getResult() == UpdateResponse.Result.NOT_FOUND) { + ll.onFailure(new ResourceNotFoundException(connectorNotFoundErrorMsg(connectorId))); + return; + } + if (shouldDeleteSyncJobs) { + new ConnectorSyncJobIndexService(clientWithOrigin).deleteAllSyncJobsByConnectorId( + connectorId, + ll.map(r -> updateResponse) + ); + } else { + ll.onResponse(updateResponse); + } + }) + ); })); - })); + } } catch (Exception e) { listener.onFailure(e); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorAction.java index 5d98f9703ecea..57d940537d176 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorAction.java @@ -9,9 +9,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -35,19 +35,16 @@ private DeleteConnectorAction() {/* no instances */} public static class Request extends ConnectorActionRequest implements ToXContentObject { private final String connectorId; + private final boolean hardDelete; private final boolean deleteSyncJobs; private static final ParseField CONNECTOR_ID_FIELD = new ParseField("connector_id"); + private static final ParseField HARD_DELETE_FIELD = new ParseField("hard"); private static final ParseField DELETE_SYNC_JOB_FIELD = new ParseField("delete_sync_jobs"); - public Request(StreamInput in) throws IOException { - super(in); - this.connectorId = in.readString(); - this.deleteSyncJobs = in.readBoolean(); - } - - public Request(String connectorId, boolean deleteSyncJobs) { + public Request(String connectorId, boolean hardDelete, boolean deleteSyncJobs) { this.connectorId = connectorId; + this.hardDelete = hardDelete; this.deleteSyncJobs = deleteSyncJobs; } @@ -66,15 +63,17 @@ public String getConnectorId() { return connectorId; } + public boolean isHardDelete() { + return hardDelete; + } + public boolean shouldDeleteSyncJobs() { return deleteSyncJobs; } @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(connectorId); - out.writeBoolean(deleteSyncJobs); + TransportAction.localOnly(); } @Override @@ -82,18 +81,21 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Request request = (Request) o; - return deleteSyncJobs == request.deleteSyncJobs && Objects.equals(connectorId, request.connectorId); + return hardDelete == request.hardDelete + && deleteSyncJobs == request.deleteSyncJobs + && Objects.equals(connectorId, request.connectorId); } @Override public int hashCode() { - return Objects.hash(connectorId, deleteSyncJobs); + return Objects.hash(connectorId, hardDelete, deleteSyncJobs); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(CONNECTOR_ID_FIELD.getPreferredName(), connectorId); + builder.field(HARD_DELETE_FIELD.getPreferredName(), hardDelete); builder.field(DELETE_SYNC_JOB_FIELD.getPreferredName(), deleteSyncJobs); builder.endObject(); return builder; @@ -102,10 +104,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "delete_connector_request", false, - (p) -> new Request((String) p[0], (boolean) p[1]) + (p) -> new Request((String) p[0], (boolean) p[1], (boolean) p[2]) ); static { PARSER.declareString(constructorArg(), CONNECTOR_ID_FIELD); + PARSER.declareBoolean(constructorArg(), HARD_DELETE_FIELD); PARSER.declareBoolean(constructorArg(), DELETE_SYNC_JOB_FIELD); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java index 8d4a6dccd95fe..2adcc1ccafbcd 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java @@ -40,8 +40,9 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String connectorId = restRequest.param(CONNECTOR_ID_PARAM); boolean shouldDeleteSyncJobs = restRequest.paramAsBoolean("delete_sync_jobs", false); + boolean hardDelete = restRequest.paramAsBoolean("hard", false); - DeleteConnectorAction.Request request = new DeleteConnectorAction.Request(connectorId, shouldDeleteSyncJobs); + DeleteConnectorAction.Request request = new DeleteConnectorAction.Request(connectorId, hardDelete, shouldDeleteSyncJobs); return channel -> client.execute(DeleteConnectorAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportDeleteConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportDeleteConnectorAction.java index e534d969fdaaa..5e0e94ece2ae8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportDeleteConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportDeleteConnectorAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -18,26 +18,21 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.application.connector.ConnectorIndexService; -public class TransportDeleteConnectorAction extends HandledTransportAction { +public class TransportDeleteConnectorAction extends TransportAction { protected final ConnectorIndexService connectorIndexService; @Inject public TransportDeleteConnectorAction(TransportService transportService, ActionFilters actionFilters, Client client) { - super( - DeleteConnectorAction.NAME, - transportService, - actionFilters, - DeleteConnectorAction.Request::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(DeleteConnectorAction.NAME, actionFilters, transportService.getTaskManager(), EsExecutors.DIRECT_EXECUTOR_SERVICE); this.connectorIndexService = new ConnectorIndexService(client); } @Override protected void doExecute(Task task, DeleteConnectorAction.Request request, ActionListener listener) { String connectorId = request.getConnectorId(); + boolean hardDelete = request.isHardDelete(); boolean shouldDeleteSyncJobs = request.shouldDeleteSyncJobs(); - connectorIndexService.deleteConnector(connectorId, shouldDeleteSyncJobs, listener.map(v -> AcknowledgedResponse.TRUE)); + connectorIndexService.deleteConnector(connectorId, hardDelete, shouldDeleteSyncJobs, listener.map(v -> AcknowledgedResponse.TRUE)); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java index c85416c5f08c5..11397583ce5b9 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -34,8 +33,6 @@ public class ListQueryRulesetsAction { public static final String NAME = "cluster:admin/xpack/query_rules/list"; public static final ActionType INSTANCE = new ActionType<>(NAME); - public static final NodeFeature QUERY_RULE_LIST_TYPES = new NodeFeature("query_rule_list_types", true); - private ListQueryRulesetsAction() {/* no instances */} public static class Request extends ActionRequest implements ToXContentObject { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetAction.java index b8293ca64cf05..c661bc9467e6e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -35,8 +34,6 @@ public class TestQueryRulesetAction { - public static final NodeFeature QUERY_RULES_TEST_API = new NodeFeature("query_rules.test", true); - // TODO - We'd like to transition this to require less stringent permissions public static final ActionType TYPE = new ActionType<>("cluster:admin/xpack/query_rules/test"); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java index d63166b01b4df..528204f4132ea 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java @@ -9,7 +9,6 @@ import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.license.LicenseUtils; @@ -45,7 +44,6 @@ public final class QueryRuleRetrieverBuilder extends CompoundRetrieverBuilder { public static final String NAME = "rule"; - public static final NodeFeature QUERY_RULE_RETRIEVERS_SUPPORTED = new NodeFeature("query_rule_retriever_supported", true); public static final ParseField RULESET_IDS_FIELD = new ParseField("ruleset_ids"); public static final ParseField MATCH_CRITERIA_FIELD = new ParseField("match_criteria"); @@ -76,9 +74,6 @@ public final class QueryRuleRetrieverBuilder extends CompoundRetrieverBuilder connectorIds = new ArrayList<>(); for (int i = 0; i < numConnectors; i++) { @@ -111,11 +111,10 @@ public void testDeleteConnector() throws Exception { } String connectorIdToDelete = connectorIds.get(0); - UpdateResponse resp = awaitDeleteConnector(connectorIdToDelete, false); + DocWriteResponse resp = awaitDeleteConnector(connectorIdToDelete, false, false); assertThat(resp.status(), equalTo(RestStatus.OK)); expectThrows(ResourceNotFoundException.class, () -> awaitGetConnector(connectorIdToDelete)); - - expectThrows(ResourceNotFoundException.class, () -> awaitDeleteConnector(connectorIdToDelete, false)); + expectThrows(ResourceNotFoundException.class, () -> awaitDeleteConnector(connectorIdToDelete, false, false)); } public void testDeleteConnector_expectSoftDeletion() throws Exception { @@ -130,13 +129,11 @@ public void testDeleteConnector_expectSoftDeletion() throws Exception { } String connectorIdToDelete = connectorIds.get(0); - UpdateResponse resp = awaitDeleteConnector(connectorIdToDelete, false); + DocWriteResponse resp = awaitDeleteConnector(connectorIdToDelete, false, false); assertThat(resp.status(), equalTo(RestStatus.OK)); expectThrows(ResourceNotFoundException.class, () -> awaitGetConnector(connectorIdToDelete)); - - expectThrows(ResourceNotFoundException.class, () -> awaitDeleteConnector(connectorIdToDelete, false)); - - Connector softDeletedConnector = awaitGetSoftDeletedConnector(connectorIdToDelete); + expectThrows(ResourceNotFoundException.class, () -> awaitDeleteConnector(connectorIdToDelete, false, false)); + Connector softDeletedConnector = awaitGetConnectorIncludeDeleted(connectorIdToDelete); assertThat(softDeletedConnector.getConnectorId(), equalTo(connectorIdToDelete)); assertThat(softDeletedConnector.getServiceType(), equalTo(connectors.get(0).getServiceType())); } @@ -150,27 +147,65 @@ public void testDeleteConnector_expectSoftDeletionMultipleConnectors() throws Ex connectorIds.add(resp.getId()); } - // Delete all of them for (int i = 0; i < numConnectors; i++) { String connectorIdToDelete = connectorIds.get(i); - UpdateResponse resp = awaitDeleteConnector(connectorIdToDelete, false); + DocWriteResponse resp = awaitDeleteConnector(connectorIdToDelete, false, false); assertThat(resp.status(), equalTo(RestStatus.OK)); } - // Connectors were deleted from main index for (int i = 0; i < numConnectors; i++) { String connectorId = connectorIds.get(i); expectThrows(ResourceNotFoundException.class, () -> awaitGetConnector(connectorId)); } - // Soft deleted connectors available in system index for (int i = 0; i < numConnectors; i++) { String connectorId = connectorIds.get(i); - Connector softDeletedConnector = awaitGetSoftDeletedConnector(connectorId); + Connector softDeletedConnector = awaitGetConnectorIncludeDeleted(connectorId); assertThat(softDeletedConnector.getConnectorId(), equalTo(connectorId)); } } + public void testDeleteConnector_expectHardDeletionSingle() throws Exception { + int numConnectors = 3; + List connectorIds = new ArrayList<>(); + for (int i = 0; i < numConnectors; i++) { + Connector connector = ConnectorTestUtils.getRandomConnector(); + ConnectorCreateActionResponse resp = awaitCreateConnector(null, connector); + connectorIds.add(resp.getId()); + } + + String connectorIdToDelete = connectorIds.get(0); + DocWriteResponse resp = awaitDeleteConnector(connectorIdToDelete, true, false); + assertThat(resp.status(), equalTo(RestStatus.OK)); + expectThrows(ResourceNotFoundException.class, () -> awaitGetConnector(connectorIdToDelete)); + expectThrows(ResourceNotFoundException.class, () -> awaitGetConnectorIncludeDeleted(connectorIdToDelete)); + expectThrows(ResourceNotFoundException.class, () -> awaitDeleteConnector(connectorIdToDelete, true, false)); + } + + public void testDeleteConnector_expectHardDeletionMultipleConnectors() throws Exception { + int numConnectors = 5; + List connectorIds = new ArrayList<>(); + for (int i = 0; i < numConnectors; i++) { + Connector connector = ConnectorTestUtils.getRandomConnector(); + ConnectorCreateActionResponse resp = awaitCreateConnector(null, connector); + connectorIds.add(resp.getId()); + } + + for (int i = 0; i < numConnectors; i++) { + String connectorIdToDelete = connectorIds.get(i); + DocWriteResponse resp = awaitDeleteConnector(connectorIdToDelete, true, false); + assertThat(resp.status(), equalTo(RestStatus.OK)); + } + + for (String connectorId : connectorIds) { + expectThrows(ResourceNotFoundException.class, () -> awaitGetConnector(connectorId)); + } + + for (String connectorId : connectorIds) { + expectThrows(ResourceNotFoundException.class, () -> awaitGetConnectorIncludeDeleted(connectorId)); + } + } + public void testUpdateConnectorConfiguration_FullConfiguration() throws Exception { Connector connector = ConnectorTestUtils.getRandomConnector(); String connectorId = randomUUID(); @@ -949,13 +984,14 @@ public void testUpdateConnectorApiKeyIdOrApiKeySecretId() throws Exception { assertThat(updateApiKeyIdRequest.getApiKeySecretId(), equalTo(indexedConnector.getApiKeySecretId())); } - private UpdateResponse awaitDeleteConnector(String connectorId, boolean deleteConnectorSyncJobs) throws Exception { + private DocWriteResponse awaitDeleteConnector(String connectorId, boolean hardDelete, boolean deleteConnectorSyncJobs) + throws Exception { CountDownLatch latch = new CountDownLatch(1); - final AtomicReference resp = new AtomicReference<>(null); + final AtomicReference resp = new AtomicReference<>(null); final AtomicReference exc = new AtomicReference<>(null); - connectorIndexService.deleteConnector(connectorId, deleteConnectorSyncJobs, new ActionListener<>() { + connectorIndexService.deleteConnector(connectorId, hardDelete, deleteConnectorSyncJobs, new ActionListener<>() { @Override - public void onResponse(UpdateResponse deleteResponse) { + public void onResponse(DocWriteResponse deleteResponse) { resp.set(deleteResponse); latch.countDown(); } @@ -1008,7 +1044,7 @@ public void onFailure(Exception e) { return resp.get(); } - private Connector awaitGetSoftDeletedConnector(String connectorId) throws Exception { + private Connector awaitGetConnectorIncludeDeleted(String connectorId) throws Exception { return awaitGetConnector(connectorId, true); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorActionRequestBWCSerializingTests.java deleted file mode 100644 index 5ad7109a6b7c1..0000000000000 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorActionRequestBWCSerializingTests.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.application.connector.action; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractBWCSerializationTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; - -public class DeleteConnectorActionRequestBWCSerializingTests extends AbstractBWCSerializationTestCase { - - @Override - protected Writeable.Reader instanceReader() { - return DeleteConnectorAction.Request::new; - } - - @Override - protected DeleteConnectorAction.Request createTestInstance() { - return new DeleteConnectorAction.Request(randomAlphaOfLengthBetween(1, 10), false); - } - - @Override - protected DeleteConnectorAction.Request mutateInstance(DeleteConnectorAction.Request instance) throws IOException { - return randomValueOtherThan(instance, this::createTestInstance); - } - - @Override - protected DeleteConnectorAction.Request doParseInstance(XContentParser parser) throws IOException { - return DeleteConnectorAction.Request.parse(parser); - } - - @Override - protected DeleteConnectorAction.Request mutateInstanceForVersion(DeleteConnectorAction.Request instance, TransportVersion version) { - return new DeleteConnectorAction.Request(instance.getConnectorId(), instance.shouldDeleteSyncJobs()); - } -} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilderTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilderTests.java index 3081dcf11d95e..3c169d9a76772 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilderTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilderTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Predicates; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; @@ -53,10 +54,7 @@ protected QueryRuleRetrieverBuilder createTestInstance() { protected QueryRuleRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { return (QueryRuleRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder( parser, - new RetrieverParserContext( - new SearchUsage(), - nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == QueryRuleRetrieverBuilder.QUERY_RULE_RETRIEVERS_SUPPORTED - ) + new RetrieverParserContext(new SearchUsage(), Predicates.never()) ); } diff --git a/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle b/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle index bc1a44f94d18a..50c7d756f43ea 100644 --- a/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle +++ b/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle @@ -42,39 +42,36 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster localCluster useCluster remoteCluster systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '') - - doFirst { - nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) - } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo("${baseName}-local").map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.rest.remote_cluster', getClusterInfo("${baseName}-remote").map { it.allHttpSocketURI.join(",") }) } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { dependsOn "processTestResources" mustRunAfter("precommit") doFirst { - localCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(localCluster) } } tasks.register("${baseName}#oneThirdUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oldClusterTest" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } tasks.register("${baseName}#twoThirdUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oneThirdUpgraded" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } tasks.register("${baseName}#fullUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#twoThirdUpgraded" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } diff --git a/x-pack/plugin/eql/qa/mixed-node/build.gradle b/x-pack/plugin/eql/qa/mixed-node/build.gradle index bbeb439ab6155..b13f42ea533f2 100644 --- a/x-pack/plugin/eql/qa/mixed-node/build.gradle +++ b/x-pack/plugin/eql/qa/mixed-node/build.gradle @@ -39,19 +39,22 @@ buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.0") && mustRunAfter("precommit") classpath = sourceSets.javaRestTest.runtimeClasspath testClassesDirs = sourceSets.javaRestTest.output.classesDirs + def socketsProvider1 = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def socketsProvider2 = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } doFirst { // Getting the endpoints causes a wait for the cluster - println "Endpoints are: ${-> testClusters."${baseName}".allHttpSocketURI.join(",")}" + println "Endpoints are: ${-> socketsProvider1.get()}" println "Upgrading one node to create a mixed cluster" - cluster.get().nextNodeToNextVersion() - - println "Upgrade complete, endpoints are: ${-> testClusters.named(baseName).get().allHttpSocketURI.join(",")}" - nonInputProperties.systemProperty('tests.rest.cluster', cluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) + getRegistry().get().nextNodeToNextVersion(cluster) + println "Upgrade complete, endpoints are: ${-> socketsProvider2.get()} }" } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') - onlyIf("BWC tests disabled") { project.bwc_tests_enabled } + + def bwcEnabled = project.bwc_tests_enabled + onlyIf("BWC tests disabled") { bwcEnabled } } tasks.register(bwcTaskName(bwcVersion)) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java index 863f89827207e..87b33b3b0893d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java @@ -246,4 +246,9 @@ public OrdinalBytesRefBlock expand() { public long ramBytesUsed() { return ordinals.ramBytesUsed() + bytes.ramBytesUsed(); } + + @Override + public String toString() { + return getClass().getSimpleName() + "[ordinals=" + ordinals + ", bytes=" + bytes + "]"; + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java index 9c35b5a44d5d3..34c27a5c1fdff 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java @@ -138,7 +138,6 @@ protected Page getCheckedOutput() throws IOException { Page page = null; // emit only one page if (remainingDocs <= 0 && pagesEmitted == 0) { - pagesEmitted++; LongBlock count = null; BooleanBlock seen = null; try { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java index c41c31345df4e..e9f540c654a22 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java @@ -151,7 +151,6 @@ public void collect(int doc) throws IOException { Page page = null; // emit only one page if (remainingDocs <= 0 && pagesEmitted == 0) { - pagesEmitted++; Block result = null; BooleanBlock seen = null; try { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index bbc3ace3716ba..2f72c309b5f21 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -68,6 +68,10 @@ public abstract class LuceneOperator extends SourceOperator { long processingNanos; int pagesEmitted; boolean doneCollecting; + /** + * Count of rows this operator has emitted. + */ + private long rowsEmitted; protected LuceneOperator(BlockFactory blockFactory, int maxPageSize, LuceneSliceQueue sliceQueue) { this.blockFactory = blockFactory; @@ -115,7 +119,12 @@ public final int limit() { @Override public final Page getOutput() { try { - return getCheckedOutput(); + Page page = getCheckedOutput(); + if (page != null) { + pagesEmitted++; + rowsEmitted += page.getPositionCount(); + } + return page; } catch (IOException ioe) { throw new UncheckedIOException(ioe); } @@ -252,6 +261,7 @@ public static class Status implements Operator.Status { private final int sliceMin; private final int sliceMax; private final int current; + private final long rowsEmitted; private Status(LuceneOperator operator) { processedSlices = operator.processedSlices; @@ -276,6 +286,7 @@ private Status(LuceneOperator operator) { current = scorer.position; } pagesEmitted = operator.pagesEmitted; + rowsEmitted = operator.rowsEmitted; } Status( @@ -288,7 +299,8 @@ private Status(LuceneOperator operator) { int pagesEmitted, int sliceMin, int sliceMax, - int current + int current, + long rowsEmitted ) { this.processedSlices = processedSlices; this.processedQueries = processedQueries; @@ -300,6 +312,7 @@ private Status(LuceneOperator operator) { this.sliceMin = sliceMin; this.sliceMax = sliceMax; this.current = current; + this.rowsEmitted = rowsEmitted; } Status(StreamInput in) throws IOException { @@ -318,6 +331,11 @@ private Status(LuceneOperator operator) { sliceMin = in.readVInt(); sliceMax = in.readVInt(); current = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsEmitted = in.readVLong(); + } else { + rowsEmitted = 0; + } } @Override @@ -336,6 +354,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(sliceMin); out.writeVInt(sliceMax); out.writeVInt(current); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsEmitted); + } } @Override @@ -383,6 +404,10 @@ public int current() { return current; } + public long rowsEmitted() { + return rowsEmitted; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -399,6 +424,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("slice_min", sliceMin); builder.field("slice_max", sliceMax); builder.field("current", current); + builder.field("rows_emitted", rowsEmitted); return builder.endObject(); } @@ -416,12 +442,13 @@ public boolean equals(Object o) { && pagesEmitted == status.pagesEmitted && sliceMin == status.sliceMin && sliceMax == status.sliceMax - && current == status.current; + && current == status.current + && rowsEmitted == status.rowsEmitted; } @Override public int hashCode() { - return Objects.hash(processedSlices, sliceIndex, totalSlices, pagesEmitted, sliceMin, sliceMax, current); + return Objects.hash(processedSlices, sliceIndex, totalSlices, pagesEmitted, sliceMin, sliceMax, current, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java index 4afabcadf60cd..3d34067e1a839 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java @@ -175,7 +175,6 @@ public Page getCheckedOutput() throws IOException { } Page page = null; if (currentPagePos >= minPageSize || remainingDocs <= 0 || scorer.isDone()) { - pagesEmitted++; IntBlock shard = null; IntBlock leaf = null; IntVector docs = null; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java index 8da62963ffb64..d25cb3a870da7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java @@ -240,7 +240,6 @@ private Page emit(boolean startEmitting) { Releasables.closeExpectNoException(shard, segments, docs, docBlock, scores); } } - pagesEmitted++; return page; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index 74affb10eaf20..8fbb946587470 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -546,8 +546,8 @@ public String toString() { } @Override - protected Status status(long processNanos, int pagesProcessed) { - return new Status(new TreeMap<>(readersBuilt), processNanos, pagesProcessed); + protected Status status(long processNanos, int pagesProcessed, long rowsReceived, long rowsEmitted) { + return new Status(new TreeMap<>(readersBuilt), processNanos, pagesProcessed, rowsReceived, rowsEmitted); } public static class Status extends AbstractPageMappingOperator.Status { @@ -559,8 +559,8 @@ public static class Status extends AbstractPageMappingOperator.Status { private final Map readersBuilt; - Status(Map readersBuilt, long processNanos, int pagesProcessed) { - super(processNanos, pagesProcessed); + Status(Map readersBuilt, long processNanos, int pagesProcessed, long rowsReceived, long rowsEmitted) { + super(processNanos, pagesProcessed, rowsReceived, rowsEmitted); this.readersBuilt = readersBuilt; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingOperator.java index 05913b7dd5f69..09d04d36f8313 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingOperator.java @@ -37,6 +37,14 @@ public abstract class AbstractPageMappingOperator implements Operator { * Count of pages that have been processed by this operator. */ private int pagesProcessed; + /** + * Count of rows this operator has received. + */ + private long rowsReceived; + /** + * Count of rows this operator has emitted. + */ + private long rowsEmitted; protected abstract Page process(Page page); @@ -52,6 +60,7 @@ public final boolean needsInput() { public final void addInput(Page page) { assert prev == null : "has pending input page"; prev = page; + rowsReceived += page.getPositionCount(); } @Override @@ -75,6 +84,9 @@ public final Page getOutput() { long start = System.nanoTime(); Page p = process(prev); pagesProcessed++; + if (p != null) { + rowsEmitted += p.getPositionCount(); + } processNanos += System.nanoTime() - start; prev = null; return p; @@ -82,11 +94,11 @@ public final Page getOutput() { @Override public final Status status() { - return status(processNanos, pagesProcessed); + return status(processNanos, pagesProcessed, rowsReceived, rowsEmitted); } - protected Status status(long processNanos, int pagesProcessed) { - return new Status(processNanos, pagesProcessed); + protected Status status(long processNanos, int pagesProcessed, long rowsReceived, long rowsEmitted) { + return new Status(processNanos, pagesProcessed, rowsReceived, rowsEmitted); } @Override @@ -105,15 +117,26 @@ public static class Status implements Operator.Status { private final long processNanos; private final int pagesProcessed; + private final long rowsReceived; + private final long rowsEmitted; - public Status(long processNanos, int pagesProcessed) { + public Status(long processNanos, int pagesProcessed, long rowsReceived, long rowsEmitted) { this.processNanos = processNanos; this.pagesProcessed = pagesProcessed; + this.rowsReceived = rowsReceived; + this.rowsEmitted = rowsEmitted; } protected Status(StreamInput in) throws IOException { processNanos = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0; pagesProcessed = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsReceived = in.readVLong(); + rowsEmitted = in.readVLong(); + } else { + rowsReceived = 0; + rowsEmitted = 0; + } } @Override @@ -122,6 +145,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(processNanos); } out.writeVInt(pagesProcessed); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsReceived); + out.writeVLong(rowsEmitted); + } } @Override @@ -133,6 +160,14 @@ public int pagesProcessed() { return pagesProcessed; } + public long rowsReceived() { + return rowsReceived; + } + + public long rowsEmitted() { + return rowsEmitted; + } + public long processNanos() { return processNanos; } @@ -153,7 +188,7 @@ protected final XContentBuilder innerToXContent(XContentBuilder builder) throws if (builder.humanReadable()) { builder.field("process_time", TimeValue.timeValueNanos(processNanos)); } - return builder.field("pages_processed", pagesProcessed); + return builder.field("pages_processed", pagesProcessed).field("rows_received", rowsReceived).field("rows_emitted", rowsEmitted); } @Override @@ -161,12 +196,15 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; - return processNanos == status.processNanos && pagesProcessed == status.pagesProcessed; + return processNanos == status.processNanos + && pagesProcessed == status.pagesProcessed + && rowsReceived == status.rowsReceived + && rowsEmitted == status.rowsEmitted; } @Override public int hashCode() { - return Objects.hash(processNanos, pagesProcessed); + return Objects.hash(processNanos, pagesProcessed, rowsReceived, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingToIteratorOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingToIteratorOperator.java index 32492af157fe6..6a165fdfa055b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingToIteratorOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingToIteratorOperator.java @@ -49,6 +49,16 @@ public abstract class AbstractPageMappingToIteratorOperator implements Operator */ private int pagesEmitted; + /** + * Count of rows this operator has received. + */ + private long rowsReceived; + + /** + * Count of rows this operator has emitted. + */ + private long rowsEmitted; + /** * Build and Iterator of results for a new page. */ @@ -82,6 +92,7 @@ public final void addInput(Page page) { } next = new RuntimeTrackingIterator(receive(page)); pagesReceived++; + rowsReceived += page.getPositionCount(); } @Override @@ -101,16 +112,23 @@ public final Page getOutput() { } Page ret = next.next(); pagesEmitted++; + rowsEmitted += ret.getPositionCount(); return ret; } @Override public final AbstractPageMappingToIteratorOperator.Status status() { - return status(processNanos, pagesReceived, pagesEmitted); + return status(processNanos, pagesReceived, pagesEmitted, rowsReceived, rowsEmitted); } - protected AbstractPageMappingToIteratorOperator.Status status(long processNanos, int pagesReceived, int pagesEmitted) { - return new AbstractPageMappingToIteratorOperator.Status(processNanos, pagesReceived, pagesEmitted); + protected AbstractPageMappingToIteratorOperator.Status status( + long processNanos, + int pagesReceived, + int pagesEmitted, + long rowsReceived, + long rowsEmitted + ) { + return new AbstractPageMappingToIteratorOperator.Status(processNanos, pagesReceived, pagesEmitted, rowsReceived, rowsEmitted); } @Override @@ -154,17 +172,28 @@ public static class Status implements Operator.Status { private final long processNanos; private final int pagesReceived; private final int pagesEmitted; + private final long rowsReceived; + private final long rowsEmitted; - public Status(long processNanos, int pagesProcessed, int pagesEmitted) { + public Status(long processNanos, int pagesProcessed, int pagesEmitted, long rowsReceived, long rowsEmitted) { this.processNanos = processNanos; this.pagesReceived = pagesProcessed; this.pagesEmitted = pagesEmitted; + this.rowsReceived = rowsReceived; + this.rowsEmitted = rowsEmitted; } protected Status(StreamInput in) throws IOException { processNanos = in.readVLong(); pagesReceived = in.readVInt(); pagesEmitted = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsReceived = in.readVLong(); + rowsEmitted = in.readVLong(); + } else { + rowsReceived = 0; + rowsEmitted = 0; + } } @Override @@ -172,6 +201,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(processNanos); out.writeVInt(pagesReceived); out.writeVInt(pagesEmitted); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsReceived); + out.writeVLong(rowsEmitted); + } } @Override @@ -187,6 +220,14 @@ public int pagesEmitted() { return pagesEmitted; } + public long rowsReceived() { + return rowsReceived; + } + + public long rowsEmitted() { + return rowsEmitted; + } + public long processNanos() { return processNanos; } @@ -207,8 +248,10 @@ protected final XContentBuilder innerToXContent(XContentBuilder builder) throws if (builder.humanReadable()) { builder.field("process_time", TimeValue.timeValueNanos(processNanos)); } - builder.field("pages_received", pagesReceived); - return builder.field("pages_emitted", pagesEmitted); + return builder.field("pages_received", pagesReceived) + .field("pages_emitted", pagesEmitted) + .field("rows_received", rowsReceived) + .field("rows_emitted", rowsEmitted); } @Override @@ -216,12 +259,16 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; AbstractPageMappingToIteratorOperator.Status status = (AbstractPageMappingToIteratorOperator.Status) o; - return processNanos == status.processNanos && pagesReceived == status.pagesReceived && pagesEmitted == status.pagesEmitted; + return processNanos == status.processNanos + && pagesReceived == status.pagesReceived + && pagesEmitted == status.pagesEmitted + && rowsReceived == status.rowsReceived + && rowsEmitted == status.rowsEmitted; } @Override public int hashCode() { - return Objects.hash(processNanos, pagesReceived, pagesEmitted); + return Objects.hash(processNanos, pagesReceived, pagesEmitted, rowsReceived, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java index f57f450c7ee39..ab086a7fbe480 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java @@ -58,6 +58,14 @@ public class AggregationOperator implements Operator { * Count of pages this operator has processed. */ private int pagesProcessed; + /** + * Count of rows this operator has received. + */ + private long rowsReceived; + /** + * Count of rows this operator has emitted. + */ + private long rowsEmitted; public record AggregationOperatorFactory(List aggregators, AggregatorMode mode) implements OperatorFactory { @@ -106,12 +114,16 @@ public void addInput(Page page) { page.releaseBlocks(); aggregationNanos += System.nanoTime() - start; pagesProcessed++; + rowsReceived += page.getPositionCount(); } } @Override public Page getOutput() { Page p = output; + if (p != null) { + rowsEmitted += p.getPositionCount(); + } this.output = null; return p; } @@ -181,7 +193,7 @@ public String toString() { @Override public Operator.Status status() { - return new Status(aggregationNanos, aggregationFinishNanos, pagesProcessed); + return new Status(aggregationNanos, aggregationFinishNanos, pagesProcessed, rowsReceived, rowsEmitted); } public static class Status implements Operator.Status { @@ -204,6 +216,14 @@ public static class Status implements Operator.Status { * Count of pages this operator has processed. */ private final int pagesProcessed; + /** + * Count of rows this operator has received. + */ + private final long rowsReceived; + /** + * Count of rows this operator has emitted. + */ + private final long rowsEmitted; /** * Build. @@ -211,10 +231,12 @@ public static class Status implements Operator.Status { * @param aggregationFinishNanos Nanoseconds this operator has spent running the aggregations. * @param pagesProcessed Count of pages this operator has processed. */ - public Status(long aggregationNanos, long aggregationFinishNanos, int pagesProcessed) { + public Status(long aggregationNanos, long aggregationFinishNanos, int pagesProcessed, long rowsReceived, long rowsEmitted) { this.aggregationNanos = aggregationNanos; this.aggregationFinishNanos = aggregationFinishNanos; this.pagesProcessed = pagesProcessed; + this.rowsReceived = rowsReceived; + this.rowsEmitted = rowsEmitted; } protected Status(StreamInput in) throws IOException { @@ -225,6 +247,13 @@ protected Status(StreamInput in) throws IOException { aggregationFinishNanos = null; } pagesProcessed = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsReceived = in.readVLong(); + rowsEmitted = in.readVLong(); + } else { + rowsReceived = 0; + rowsEmitted = 0; + } } @Override @@ -234,6 +263,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(aggregationFinishNanos); } out.writeVInt(pagesProcessed); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsReceived); + out.writeVLong(rowsEmitted); + } } @Override @@ -262,6 +295,20 @@ public int pagesProcessed() { return pagesProcessed; } + /** + * Count of rows this operator has received. + */ + public long rowsReceived() { + return rowsReceived; + } + + /** + * Count of rows this operator has emitted. + */ + public long rowsEmitted() { + return rowsEmitted; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -277,6 +324,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws ); } builder.field("pages_processed", pagesProcessed); + builder.field("rows_received", rowsReceived); + builder.field("rows_emitted", rowsEmitted); return builder.endObject(); } @@ -287,13 +336,15 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; return aggregationNanos == status.aggregationNanos + && Objects.equals(aggregationFinishNanos, status.aggregationFinishNanos) && pagesProcessed == status.pagesProcessed - && Objects.equals(aggregationFinishNanos, status.aggregationFinishNanos); + && rowsReceived == status.rowsReceived + && rowsEmitted == status.rowsEmitted; } @Override public int hashCode() { - return Objects.hash(aggregationNanos, aggregationFinishNanos, pagesProcessed); + return Objects.hash(aggregationNanos, aggregationFinishNanos, pagesProcessed, rowsReceived, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 06b890603e489..df522e931ca07 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -29,15 +29,18 @@ import java.util.concurrent.atomic.LongAdder; /** - * {@link AsyncOperator} performs an external computation specified in {@link #performAsync(Page, ActionListener)}. - * This operator acts as a client and operates on a per-page basis to reduce communication overhead. + * {@link AsyncOperator} performs an external computation specified in + * {@link #performAsync(Page, ActionListener)}. This operator acts as a client + * to reduce communication overhead and fetches a {@code Fetched} at a time. + * It's the responsibility of subclasses to transform that {@code Fetched} into + * output. * @see #performAsync(Page, ActionListener) */ -public abstract class AsyncOperator implements Operator { +public abstract class AsyncOperator implements Operator { private volatile SubscribableListener blockedFuture; - private final Map buffers = ConcurrentCollections.newConcurrentMap(); + private final Map buffers = ConcurrentCollections.newConcurrentMap(); private final FailureCollector failureCollector = new FailureCollector(); private final DriverContext driverContext; @@ -84,7 +87,7 @@ public void addInput(Page input) { driverContext.addAsyncAction(); boolean success = false; try { - final ActionListener listener = ActionListener.wrap(output -> { + final ActionListener listener = ActionListener.wrap(output -> { buffers.put(seqNo, output); onSeqNoCompleted(seqNo); }, e -> { @@ -105,18 +108,20 @@ public void addInput(Page input) { } } - private void releasePageOnAnyThread(Page page) { + protected static void releasePageOnAnyThread(Page page) { page.allowPassingToDifferentDriver(); page.releaseBlocks(); } + protected abstract void releaseFetchedOnAnyThread(Fetched result); + /** * Performs an external computation and notify the listener when the result is ready. * * @param inputPage the input page * @param listener the listener */ - protected abstract void performAsync(Page inputPage, ActionListener listener); + protected abstract void performAsync(Page inputPage, ActionListener listener); protected abstract void doClose(); @@ -126,7 +131,7 @@ private void onSeqNoCompleted(long seqNo) { notifyIfBlocked(); } if (closed || failureCollector.hasFailure()) { - discardPages(); + discardResults(); } } @@ -146,18 +151,18 @@ private void notifyIfBlocked() { private void checkFailure() { Exception e = failureCollector.getFailure(); if (e != null) { - discardPages(); + discardResults(); throw ExceptionsHelper.convertToRuntime(e); } } - private void discardPages() { + private void discardResults() { long nextCheckpoint; while ((nextCheckpoint = checkpoint.getPersistedCheckpoint() + 1) <= checkpoint.getProcessedCheckpoint()) { - Page page = buffers.remove(nextCheckpoint); + Fetched result = buffers.remove(nextCheckpoint); checkpoint.markSeqNoAsPersisted(nextCheckpoint); - if (page != null) { - releasePageOnAnyThread(page); + if (result != null) { + releaseFetchedOnAnyThread(result); } } } @@ -166,7 +171,7 @@ private void discardPages() { public final void close() { finish(); closed = true; - discardPages(); + discardResults(); doClose(); } @@ -185,15 +190,18 @@ public boolean isFinished() { } } - @Override - public Page getOutput() { + /** + * Get a {@link Fetched} from the buffer. + * @return a result if one is ready or {@code null} if none are available. + */ + public final Fetched fetchFromBuffer() { checkFailure(); long persistedCheckpoint = checkpoint.getPersistedCheckpoint(); if (persistedCheckpoint < checkpoint.getProcessedCheckpoint()) { persistedCheckpoint++; - Page page = buffers.remove(persistedCheckpoint); + Fetched result = buffers.remove(persistedCheckpoint); checkpoint.markSeqNoAsPersisted(persistedCheckpoint); - return page; + return result; } else { return null; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java index acbf8a17b31fd..78572f55cd5eb 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.compute.Describable; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -74,10 +75,9 @@ public class Driver implements Releasable, Describable { private final long statusNanos; private final AtomicReference cancelReason = new AtomicReference<>(); - private final AtomicReference> blocked = new AtomicReference<>(); - private final AtomicBoolean started = new AtomicBoolean(); private final SubscribableListener completionListener = new SubscribableListener<>(); + private final DriverScheduler scheduler = new DriverScheduler(); /** * Status reported to the tasks API. We write the status at most once every @@ -186,7 +186,13 @@ SubscribableListener run(TimeValue maxTime, int maxIterations, LongSupplie long nextStatus = startTime + statusNanos; int iter = 0; while (true) { - IsBlockedResult isBlocked = runSingleLoopIteration(); + IsBlockedResult isBlocked = Operator.NOT_BLOCKED; + try { + isBlocked = runSingleLoopIteration(); + } catch (DriverEarlyTerminationException unused) { + closeEarlyFinishedOperators(); + assert isFinished() : "not finished after early termination"; + } iter++; if (isBlocked.listener().isDone() == false) { updateStatus(nowSupplier.getAsLong() - startTime, iter, DriverStatus.Status.ASYNC, isBlocked.reason()); @@ -242,7 +248,7 @@ public void abort(Exception reason, ActionListener listener) { } private IsBlockedResult runSingleLoopIteration() { - ensureNotCancelled(); + driverContext.checkForEarlyTermination(); boolean movedPage = false; for (int i = 0; i < activeOperators.size() - 1; i++) { @@ -255,6 +261,7 @@ private IsBlockedResult runSingleLoopIteration() { } if (op.isFinished() == false && nextOp.needsInput()) { + driverContext.checkForEarlyTermination(); Page page = op.getOutput(); if (page == null) { // No result, just move to the next iteration @@ -263,16 +270,37 @@ private IsBlockedResult runSingleLoopIteration() { page.releaseBlocks(); } else { // Non-empty result from the previous operation, move it to the next operation + try { + driverContext.checkForEarlyTermination(); + } catch (DriverEarlyTerminationException | TaskCancelledException e) { + page.releaseBlocks(); + throw e; + } nextOp.addInput(page); movedPage = true; } } if (op.isFinished()) { + driverContext.checkForEarlyTermination(); nextOp.finish(); } } + closeEarlyFinishedOperators(); + + if (movedPage == false) { + return oneOf( + activeOperators.stream() + .map(Operator::isBlocked) + .filter(laf -> laf.listener().isDone() == false) + .collect(Collectors.toList()) + ); + } + return Operator.NOT_BLOCKED; + } + + private void closeEarlyFinishedOperators() { for (int index = activeOperators.size() - 1; index >= 0; index--) { if (activeOperators.get(index).isFinished()) { /* @@ -298,37 +326,11 @@ private IsBlockedResult runSingleLoopIteration() { break; } } - - if (movedPage == false) { - return oneOf( - activeOperators.stream() - .map(Operator::isBlocked) - .filter(laf -> laf.listener().isDone() == false) - .collect(Collectors.toList()) - ); - } - return Operator.NOT_BLOCKED; } public void cancel(String reason) { if (cancelReason.compareAndSet(null, reason)) { - synchronized (this) { - SubscribableListener fut = this.blocked.get(); - if (fut != null) { - fut.onFailure(new TaskCancelledException(reason)); - } - } - } - } - - private boolean isCancelled() { - return cancelReason.get() != null; - } - - private void ensureNotCancelled() { - String reason = cancelReason.get(); - if (reason != null) { - throw new TaskCancelledException(reason); + scheduler.runPendingTasks(); } } @@ -342,10 +344,36 @@ public static void start( driver.completionListener.addListener(listener); if (driver.started.compareAndSet(false, true)) { driver.updateStatus(0, 0, DriverStatus.Status.STARTING, "driver starting"); + initializeEarlyTerminationChecker(driver); schedule(DEFAULT_TIME_BEFORE_YIELDING, maxIterations, threadContext, executor, driver, driver.completionListener); } } + private static void initializeEarlyTerminationChecker(Driver driver) { + // Register a listener to an exchange sink to handle early completion scenarios: + // 1. When the query accumulates sufficient data (e.g., reaching the LIMIT). + // 2. When users abort the query but want to retain the current result. + // This allows the Driver to finish early without waiting for the scheduled task. + final AtomicBoolean earlyFinished = new AtomicBoolean(); + driver.driverContext.initializeEarlyTerminationChecker(() -> { + final String reason = driver.cancelReason.get(); + if (reason != null) { + throw new TaskCancelledException(reason); + } + if (earlyFinished.get()) { + throw new DriverEarlyTerminationException("Exchange sink is closed"); + } + }); + if (driver.activeOperators.isEmpty() == false) { + if (driver.activeOperators.getLast() instanceof ExchangeSinkOperator sinkOperator) { + sinkOperator.addCompletionListener(ActionListener.running(() -> { + earlyFinished.set(true); + driver.scheduler.runPendingTasks(); + })); + } + } + } + // Drains all active operators and closes them. private void drainAndCloseOperators(@Nullable Exception e) { Iterator itr = activeOperators.iterator(); @@ -371,7 +399,7 @@ private static void schedule( Driver driver, ActionListener listener ) { - executor.execute(new AbstractRunnable() { + final var task = new AbstractRunnable() { @Override protected void doRun() { @@ -383,16 +411,12 @@ protected void doRun() { if (fut.isDone()) { schedule(maxTime, maxIterations, threadContext, executor, driver, listener); } else { - synchronized (driver) { - if (driver.isCancelled() == false) { - driver.blocked.set(fut); - } - } ActionListener readyListener = ActionListener.wrap( ignored -> schedule(maxTime, maxIterations, threadContext, executor, driver, listener), this::onFailure ); fut.addListener(ContextPreservingActionListener.wrapPreservingContext(readyListener, threadContext)); + driver.scheduler.addOrRunDelayedTask(() -> fut.onResponse(null)); } } @@ -405,7 +429,8 @@ public void onFailure(Exception e) { void onComplete(ActionListener listener) { driver.driverContext.waitForAsyncActions(ContextPreservingActionListener.wrapPreservingContext(listener, threadContext)); } - }); + }; + driver.scheduler.scheduleOrRunTask(executor, task); } private static IsBlockedResult oneOf(List results) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java index 843aa4aaaa881..1877f564677ba 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java @@ -60,6 +60,8 @@ public class DriverContext { private final WarningsMode warningsMode; + private Runnable earlyTerminationChecker = () -> {}; + public DriverContext(BigArrays bigArrays, BlockFactory blockFactory) { this(bigArrays, blockFactory, WarningsMode.COLLECT); } @@ -175,6 +177,21 @@ public void removeAsyncAction() { asyncActions.removeInstance(); } + /** + * Checks if the Driver associated with this DriverContext has been cancelled or early terminated. + */ + public void checkForEarlyTermination() { + earlyTerminationChecker.run(); + } + + /** + * Initializes the early termination or cancellation checker for this DriverContext. + * This method should be called when associating this DriverContext with a driver. + */ + public void initializeEarlyTerminationChecker(Runnable checker) { + this.earlyTerminationChecker = checker; + } + /** * Evaluators should use this function to decide their warning behavior. * @return an appropriate {@link WarningsMode} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverEarlyTerminationException.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverEarlyTerminationException.java new file mode 100644 index 0000000000000..6f79a6341df7d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverEarlyTerminationException.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.ElasticsearchException; + +/** + * An exception indicates that a compute should be terminated early as the downstream pipeline has enough or no long requires more data. + */ +public final class DriverEarlyTerminationException extends ElasticsearchException { + public DriverEarlyTerminationException(String message) { + super(message); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverScheduler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverScheduler.java new file mode 100644 index 0000000000000..05fe38007a929 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverScheduler.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.util.concurrent.EsExecutors; + +import java.util.List; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +/** + * A Driver be put to sleep while its sink is full or its source is empty or be rescheduled after running several iterations. + * This scheduler tracks the delayed and scheduled tasks, allowing them to run without waking up the driver or waiting for + * the thread pool to pick up the task. This enables fast cancellation or early finishing without discarding the current result. + */ +final class DriverScheduler { + private final AtomicReference delayedTask = new AtomicReference<>(); + private final AtomicReference scheduledTask = new AtomicReference<>(); + private final AtomicBoolean completing = new AtomicBoolean(); + + void addOrRunDelayedTask(Runnable task) { + delayedTask.set(task); + if (completing.get()) { + final Runnable toRun = delayedTask.getAndSet(null); + if (toRun != null) { + assert task == toRun; + toRun.run(); + } + } + } + + void scheduleOrRunTask(Executor executor, Runnable task) { + final Runnable existing = scheduledTask.getAndSet(task); + assert existing == null : existing; + final Executor executorToUse = completing.get() ? EsExecutors.DIRECT_EXECUTOR_SERVICE : executor; + executorToUse.execute(() -> { + final Runnable next = scheduledTask.getAndSet(null); + if (next != null) { + assert next == task; + next.run(); + } + }); + } + + void runPendingTasks() { + completing.set(true); + for (var taskHolder : List.of(delayedTask, scheduledTask)) { + final Runnable task = taskHolder.getAndSet(null); + if (task != null) { + task.run(); + } + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java index ccddfdf5cc74a..c47b6cebdaddc 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java @@ -99,6 +99,14 @@ public String describe() { * Count of pages this operator has processed. */ private int pagesProcessed; + /** + * Count of rows this operator has received. + */ + private long rowsReceived; + /** + * Count of rows this operator has emitted. + */ + private long rowsEmitted; @SuppressWarnings("this-escape") public HashAggregationOperator( @@ -187,12 +195,16 @@ public void close() { } finally { page.releaseBlocks(); pagesProcessed++; + rowsReceived += page.getPositionCount(); } } @Override public Page getOutput() { Page p = output; + if (p != null) { + rowsEmitted += p.getPositionCount(); + } output = null; return p; } @@ -246,7 +258,7 @@ public void close() { @Override public Operator.Status status() { - return new Status(hashNanos, aggregationNanos, pagesProcessed); + return new Status(hashNanos, aggregationNanos, pagesProcessed, rowsReceived, rowsEmitted); } protected static void checkState(boolean condition, String msg) { @@ -288,23 +300,43 @@ public static class Status implements Operator.Status { * Count of pages this operator has processed. */ private final int pagesProcessed; + /** + * Count of rows this operator has received. + */ + private final long rowsReceived; + /** + * Count of rows this operator has emitted. + */ + private final long rowsEmitted; /** * Build. * @param hashNanos Nanoseconds this operator has spent hashing grouping keys. * @param aggregationNanos Nanoseconds this operator has spent running the aggregations. * @param pagesProcessed Count of pages this operator has processed. + * @param rowsReceived Count of rows this operator has received. + * @param rowsEmitted Count of rows this operator has emitted. */ - public Status(long hashNanos, long aggregationNanos, int pagesProcessed) { + public Status(long hashNanos, long aggregationNanos, int pagesProcessed, long rowsReceived, long rowsEmitted) { this.hashNanos = hashNanos; this.aggregationNanos = aggregationNanos; this.pagesProcessed = pagesProcessed; + this.rowsReceived = rowsReceived; + this.rowsEmitted = rowsEmitted; } protected Status(StreamInput in) throws IOException { hashNanos = in.readVLong(); aggregationNanos = in.readVLong(); pagesProcessed = in.readVInt(); + + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsReceived = in.readVLong(); + rowsEmitted = in.readVLong(); + } else { + rowsReceived = 0; + rowsEmitted = 0; + } } @Override @@ -312,6 +344,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(hashNanos); out.writeVLong(aggregationNanos); out.writeVInt(pagesProcessed); + + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsReceived); + out.writeVLong(rowsEmitted); + } } @Override @@ -340,6 +377,20 @@ public int pagesProcessed() { return pagesProcessed; } + /** + * Count of rows this operator has received. + */ + public long rowsReceived() { + return rowsReceived; + } + + /** + * Count of rows this operator has emitted. + */ + public long rowsEmitted() { + return rowsEmitted; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -352,6 +403,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("aggregation_time", TimeValue.timeValueNanos(aggregationNanos)); } builder.field("pages_processed", pagesProcessed); + builder.field("rows_received", rowsReceived); + builder.field("rows_emitted", rowsEmitted); return builder.endObject(); } @@ -361,12 +414,16 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; - return hashNanos == status.hashNanos && aggregationNanos == status.aggregationNanos && pagesProcessed == status.pagesProcessed; + return hashNanos == status.hashNanos + && aggregationNanos == status.aggregationNanos + && pagesProcessed == status.pagesProcessed + && rowsReceived == status.rowsReceived + && rowsEmitted == status.rowsEmitted; } @Override public int hashCode() { - return Objects.hash(hashNanos, aggregationNanos, pagesProcessed); + return Objects.hash(hashNanos, aggregationNanos, pagesProcessed, rowsReceived, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java index 34e37031e6f11..b669be9192d06 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java @@ -37,6 +37,16 @@ public class LimitOperator implements Operator { */ private int pagesProcessed; + /** + * Count of rows this operator has received. + */ + private long rowsReceived; + + /** + * Count of rows this operator has emitted. + */ + private long rowsEmitted; + private Page lastInput; private boolean finished; @@ -67,6 +77,7 @@ public boolean needsInput() { public void addInput(Page page) { assert lastInput == null : "has pending input page"; lastInput = page; + rowsReceived += page.getPositionCount(); } @Override @@ -117,13 +128,14 @@ public Page getOutput() { } lastInput = null; pagesProcessed++; + rowsEmitted += result.getPositionCount(); return result; } @Override public Status status() { - return new Status(limit, limitRemaining, pagesProcessed); + return new Status(limit, limitRemaining, pagesProcessed, rowsReceived, rowsEmitted); } @Override @@ -160,16 +172,35 @@ public static class Status implements Operator.Status { */ private final int pagesProcessed; - protected Status(int limit, int limitRemaining, int pagesProcessed) { + /** + * Count of rows this operator has received. + */ + private final long rowsReceived; + + /** + * Count of rows this operator has emitted. + */ + private final long rowsEmitted; + + protected Status(int limit, int limitRemaining, int pagesProcessed, long rowsReceived, long rowsEmitted) { this.limit = limit; this.limitRemaining = limitRemaining; this.pagesProcessed = pagesProcessed; + this.rowsReceived = rowsReceived; + this.rowsEmitted = rowsEmitted; } protected Status(StreamInput in) throws IOException { limit = in.readVInt(); limitRemaining = in.readVInt(); pagesProcessed = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsReceived = in.readVLong(); + rowsEmitted = in.readVLong(); + } else { + rowsReceived = 0; + rowsEmitted = 0; + } } @Override @@ -177,6 +208,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(limit); out.writeVInt(limitRemaining); out.writeVInt(pagesProcessed); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsReceived); + out.writeVLong(rowsEmitted); + } } @Override @@ -205,12 +240,28 @@ public int pagesProcessed() { return pagesProcessed; } + /** + * Count of rows this operator has received. + */ + public long rowsReceived() { + return rowsReceived; + } + + /** + * Count of rows this operator has emitted. + */ + public long rowsEmitted() { + return rowsEmitted; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("limit", limit); builder.field("limit_remaining", limitRemaining); builder.field("pages_processed", pagesProcessed); + builder.field("rows_received", rowsReceived); + builder.field("rows_emitted", rowsEmitted); return builder.endObject(); } @@ -219,12 +270,16 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; - return limit == status.limit && limitRemaining == status.limitRemaining && pagesProcessed == status.pagesProcessed; + return limit == status.limit + && limitRemaining == status.limitRemaining + && pagesProcessed == status.pagesProcessed + && rowsReceived == status.rowsReceived + && rowsEmitted == status.rowsEmitted; } @Override public int hashCode() { - return Objects.hash(limit, limitRemaining, pagesProcessed); + return Objects.hash(limit, limitRemaining, pagesProcessed, rowsReceived, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java index e87329a907054..1659a88a84cda 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java @@ -69,10 +69,21 @@ public String describe() { private int nextItemOnExpanded = 0; /** - * Count of pages that have been processed by this operator. + * Count of pages that this operator has received. */ - private int pagesIn; - private int pagesOut; + private int pagesReceived; + /** + * Count of pages this operator has emitted. + */ + private int pagesEmitted; + /** + * Count of rows this operator has received. + */ + private long rowsReceived; + /** + * Count of rows this operator has emitted. + */ + private long rowsEmitted; public MvExpandOperator(int channel, int pageSize) { this.channel = channel; @@ -82,10 +93,18 @@ public MvExpandOperator(int channel, int pageSize) { @Override public final Page getOutput() { + Page result = getOutputInternal(); + if (result != null) { + pagesEmitted++; + rowsEmitted += result.getPositionCount(); + } + return result; + } + + private Page getOutputInternal() { if (prev == null) { return null; } - pagesOut++; if (expandedBlock == null) { /* @@ -214,7 +233,8 @@ public final void addInput(Page page) { assert prev == null : "has pending input page"; prev = page; this.expandingBlock = prev.getBlock(channel); - pagesIn++; + pagesReceived++; + rowsReceived += page.getPositionCount(); } @Override @@ -229,7 +249,7 @@ public final boolean isFinished() { @Override public final Status status() { - return new Status(pagesIn, pagesOut, noops); + return new Status(pagesReceived, pagesEmitted, noops, rowsReceived, rowsEmitted); } @Override @@ -248,9 +268,11 @@ public String toString() { public static final class Status implements Operator.Status { - private final int pagesIn; - private final int pagesOut; + private final int pagesReceived; + private final int pagesEmitted; private final int noops; + private final long rowsReceived; + private final long rowsEmitted; public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Operator.Status.class, @@ -258,31 +280,46 @@ public static final class Status implements Operator.Status { Status::new ); - Status(int pagesIn, int pagesOut, int noops) { - this.pagesIn = pagesIn; - this.pagesOut = pagesOut; + Status(int pagesReceived, int pagesEmitted, int noops, long rowsReceived, long rowsEmitted) { + this.pagesReceived = pagesReceived; + this.pagesEmitted = pagesEmitted; this.noops = noops; + this.rowsReceived = rowsReceived; + this.rowsEmitted = rowsEmitted; } Status(StreamInput in) throws IOException { - pagesIn = in.readVInt(); - pagesOut = in.readVInt(); + pagesReceived = in.readVInt(); + pagesEmitted = in.readVInt(); noops = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsReceived = in.readVLong(); + rowsEmitted = in.readVLong(); + } else { + rowsReceived = 0; + rowsEmitted = 0; + } } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(pagesIn); - out.writeVInt(pagesOut); + out.writeVInt(pagesReceived); + out.writeVInt(pagesEmitted); out.writeVInt(noops); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsReceived); + out.writeVLong(rowsEmitted); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("pages_in", pagesIn); - builder.field("pages_out", pagesOut); + builder.field("pages_received", pagesReceived); + builder.field("pages_emitted", pagesEmitted); builder.field("noops", noops); + builder.field("rows_received", rowsReceived); + builder.field("rows_emitted", rowsEmitted); return builder.endObject(); } @@ -304,20 +341,32 @@ public boolean equals(Object o) { return false; } Status status = (Status) o; - return noops == status.noops && pagesIn == status.pagesIn && pagesOut == status.pagesOut; + return noops == status.noops + && pagesReceived == status.pagesReceived + && pagesEmitted == status.pagesEmitted + && rowsReceived == status.rowsReceived + && rowsEmitted == status.rowsEmitted; + } + + public int pagesReceived() { + return pagesReceived; + } + + public int pagesEmitted() { + return pagesEmitted; } - public int pagesIn() { - return pagesIn; + public long rowsReceived() { + return rowsReceived; } - public int pagesOut() { - return pagesOut; + public long rowsEmitted() { + return rowsEmitted; } @Override public int hashCode() { - return Objects.hash(noops, pagesIn, pagesOut); + return Objects.hash(noops, pagesReceived, pagesEmitted, rowsReceived, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java index e96ca9e39b7e5..ee939c9a07e0b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.operator.exchange; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.IsBlockedResult; @@ -30,6 +31,11 @@ public interface ExchangeSink { */ boolean isFinished(); + /** + * Adds a listener that will be notified when this exchange sink is finished. + */ + void addCompletionListener(ActionListener listener); + /** * Whether the sink is blocked on adding more pages */ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index 614c3fe0ecc5c..21eb2ed565618 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -52,9 +52,11 @@ public ExchangeSinkHandler(BlockFactory blockFactory, int maxBufferSize, LongSup private class ExchangeSinkImpl implements ExchangeSink { boolean finished; + private final SubscribableListener onFinished = new SubscribableListener<>(); ExchangeSinkImpl() { onChanged(); + buffer.addCompletionListener(onFinished); outstandingSinks.incrementAndGet(); } @@ -68,6 +70,7 @@ public void addPage(Page page) { public void finish() { if (finished == false) { finished = true; + onFinished.onResponse(null); onChanged(); if (outstandingSinks.decrementAndGet() == 0) { buffer.finish(false); @@ -78,7 +81,12 @@ public void finish() { @Override public boolean isFinished() { - return finished || buffer.isFinished(); + return onFinished.isDone(); + } + + @Override + public void addCompletionListener(ActionListener listener) { + onFinished.addListener(listener); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java index dd89dfe480c36..f87edd1a3e169 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java @@ -9,6 +9,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -32,7 +33,8 @@ public class ExchangeSinkOperator extends SinkOperator { private final ExchangeSink sink; private final Function transformer; - private int pagesAccepted; + private int pagesReceived; + private long rowsReceived; public record ExchangeSinkOperatorFactory(Supplier exchangeSinks, Function transformer) implements @@ -59,6 +61,10 @@ public boolean isFinished() { return sink.isFinished(); } + public void addCompletionListener(ActionListener listener) { + sink.addCompletionListener(listener); + } + @Override public void finish() { sink.finish(); @@ -76,7 +82,8 @@ public boolean needsInput() { @Override protected void doAddInput(Page page) { - pagesAccepted++; + pagesReceived++; + rowsReceived += page.getPositionCount(); sink.addPage(transformer.apply(page)); } @@ -92,7 +99,7 @@ public String toString() { @Override public Status status() { - return new Status(pagesAccepted); + return new Status(pagesReceived, rowsReceived); } public static class Status implements Operator.Status { @@ -102,19 +109,31 @@ public static class Status implements Operator.Status { Status::new ); - private final int pagesAccepted; + private final int pagesReceived; + private final long rowsReceived; - Status(int pagesAccepted) { - this.pagesAccepted = pagesAccepted; + Status(int pagesReceived, long rowsReceived) { + this.pagesReceived = pagesReceived; + this.rowsReceived = rowsReceived; } Status(StreamInput in) throws IOException { - pagesAccepted = in.readVInt(); + pagesReceived = in.readVInt(); + + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsReceived = in.readVLong(); + } else { + rowsReceived = 0; + } } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(pagesAccepted); + out.writeVInt(pagesReceived); + + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsReceived); + } } @Override @@ -122,14 +141,19 @@ public String getWriteableName() { return ENTRY.name; } - public int pagesAccepted() { - return pagesAccepted; + public int pagesReceived() { + return pagesReceived; + } + + public long rowsReceived() { + return rowsReceived; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("pages_accepted", pagesAccepted); + builder.field("pages_received", pagesReceived); + builder.field("rows_received", rowsReceived); return builder.endObject(); } @@ -138,12 +162,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; - return pagesAccepted == status.pagesAccepted; + return pagesReceived == status.pagesReceived && rowsReceived == status.rowsReceived; } @Override public int hashCode() { - return Objects.hash(pagesAccepted); + return Objects.hash(pagesReceived, rowsReceived); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java index 2d0ce228e81df..3a96f1bb1d368 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java @@ -32,6 +32,7 @@ public class ExchangeSourceOperator extends SourceOperator { private final ExchangeSource source; private IsBlockedResult isBlocked = NOT_BLOCKED; private int pagesEmitted; + private long rowsEmitted; public record ExchangeSourceOperatorFactory(Supplier exchangeSources) implements SourceOperatorFactory { @@ -55,6 +56,7 @@ public Page getOutput() { final var page = source.pollPage(); if (page != null) { pagesEmitted++; + rowsEmitted += page.getPositionCount(); } return page; } @@ -92,7 +94,7 @@ public String toString() { @Override public Status status() { - return new Status(source.bufferSize(), pagesEmitted); + return new Status(source.bufferSize(), pagesEmitted, rowsEmitted); } public static class Status implements Operator.Status { @@ -104,21 +106,33 @@ public static class Status implements Operator.Status { private final int pagesWaiting; private final int pagesEmitted; + private final long rowsEmitted; - Status(int pagesWaiting, int pagesEmitted) { + Status(int pagesWaiting, int pagesEmitted, long rowsEmitted) { this.pagesWaiting = pagesWaiting; this.pagesEmitted = pagesEmitted; + this.rowsEmitted = rowsEmitted; } Status(StreamInput in) throws IOException { pagesWaiting = in.readVInt(); pagesEmitted = in.readVInt(); + + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + rowsEmitted = in.readVLong(); + } else { + rowsEmitted = 0; + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(pagesWaiting); out.writeVInt(pagesEmitted); + + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVLong(rowsEmitted); + } } @Override @@ -134,11 +148,16 @@ public int pagesEmitted() { return pagesEmitted; } + public long rowsEmitted() { + return rowsEmitted; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("pages_waiting", pagesWaiting); builder.field("pages_emitted", pagesEmitted); + builder.field("rows_emitted", rowsEmitted); return builder.endObject(); } @@ -147,12 +166,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; - return pagesWaiting == status.pagesWaiting && pagesEmitted == status.pagesEmitted; + return pagesWaiting == status.pagesWaiting && pagesEmitted == status.pagesEmitted && rowsEmitted == status.rowsEmitted; } @Override public int hashCode() { - return Objects.hash(pagesWaiting, pagesEmitted); + return Objects.hash(pagesWaiting, pagesEmitted, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/RightChunkedLeftJoin.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/RightChunkedLeftJoin.java index f9895ff346b5c..2e2a0d383e6b4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/RightChunkedLeftJoin.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/RightChunkedLeftJoin.java @@ -128,7 +128,7 @@ * | l99 | null | null | * } */ -class RightChunkedLeftJoin implements Releasable { +public class RightChunkedLeftJoin implements Releasable { private final Page leftHand; private final int mergedElementCount; /** @@ -138,12 +138,12 @@ class RightChunkedLeftJoin implements Releasable { */ private int next = 0; - RightChunkedLeftJoin(Page leftHand, int mergedElementCounts) { + public RightChunkedLeftJoin(Page leftHand, int mergedElementCounts) { this.leftHand = leftHand; this.mergedElementCount = mergedElementCounts; } - Page join(Page rightHand) { + public Page join(Page rightHand) { IntVector positions = rightHand.getBlock(0).asVector(); if (positions.getInt(0) < next - 1) { throw new IllegalArgumentException("maximum overlap is one position"); @@ -209,7 +209,7 @@ Page join(Page rightHand) { } } - Optional noMoreRightHandPages() { + public Optional noMoreRightHandPages() { if (next == leftHand.getPositionCount()) { return Optional.empty(); } @@ -237,6 +237,14 @@ Optional noMoreRightHandPages() { } } + /** + * Release this on any thread, rather than just the thread that built it. + */ + public void releaseOnAnyThread() { + leftHand.allowPassingToDifferentDriver(); + leftHand.releaseBlocks(); + } + @Override public void close() { Releasables.close(leftHand::releaseBlocks); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperator.java index 682a5d6050c22..0489be58fade7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperator.java @@ -278,6 +278,26 @@ public String describe() { private Iterator output; + /** + * Count of pages that have been received by this operator. + */ + private int pagesReceived; + + /** + * Count of pages that have been emitted by this operator. + */ + private int pagesEmitted; + + /** + * Count of rows this operator has received. + */ + private long rowsReceived; + + /** + * Count of rows this operator has emitted. + */ + private long rowsEmitted; + public TopNOperator( BlockFactory blockFactory, CircuitBreaker breaker, @@ -368,7 +388,9 @@ public void addInput(Page page) { spare = inputQueue.insertWithOverflow(spare); } } finally { - Releasables.close(() -> page.releaseBlocks()); + page.releaseBlocks(); + pagesReceived++; + rowsReceived += page.getPositionCount(); } } @@ -491,10 +513,13 @@ public boolean isFinished() { @Override public Page getOutput() { - if (output != null && output.hasNext()) { - return output.next(); + if (output == null || output.hasNext() == false) { + return null; } - return null; + Page ret = output.next(); + pagesEmitted++; + rowsEmitted += ret.getPositionCount(); + return ret; } @Override @@ -531,7 +556,7 @@ public long ramBytesUsed() { @Override public Status status() { - return new TopNOperatorStatus(inputQueue.size(), ramBytesUsed()); + return new TopNOperatorStatus(inputQueue.size(), ramBytesUsed(), pagesReceived, pagesEmitted, rowsReceived, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatus.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatus.java index 1617a546be2cc..ceccdce529ce8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatus.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatus.java @@ -27,21 +27,55 @@ public class TopNOperatorStatus implements Operator.Status { ); private final int occupiedRows; private final long ramBytesUsed; + private final int pagesReceived; + private final int pagesEmitted; + private final long rowsReceived; + private final long rowsEmitted; - public TopNOperatorStatus(int occupiedRows, long ramBytesUsed) { + public TopNOperatorStatus( + int occupiedRows, + long ramBytesUsed, + int pagesReceived, + int pagesEmitted, + long rowsReceived, + long rowsEmitted + ) { this.occupiedRows = occupiedRows; this.ramBytesUsed = ramBytesUsed; + this.pagesReceived = pagesReceived; + this.pagesEmitted = pagesEmitted; + this.rowsReceived = rowsReceived; + this.rowsEmitted = rowsEmitted; } TopNOperatorStatus(StreamInput in) throws IOException { this.occupiedRows = in.readVInt(); this.ramBytesUsed = in.readVLong(); + + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + this.pagesReceived = in.readVInt(); + this.pagesEmitted = in.readVInt(); + this.rowsReceived = in.readVLong(); + this.rowsEmitted = in.readVLong(); + } else { + this.pagesReceived = 0; + this.pagesEmitted = 0; + this.rowsReceived = 0; + this.rowsEmitted = 0; + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(occupiedRows); out.writeVLong(ramBytesUsed); + + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) { + out.writeVInt(pagesReceived); + out.writeVInt(pagesEmitted); + out.writeVLong(rowsReceived); + out.writeVLong(rowsEmitted); + } } @Override @@ -57,12 +91,32 @@ public long ramBytesUsed() { return ramBytesUsed; } + public int pagesReceived() { + return pagesReceived; + } + + public int pagesEmitted() { + return pagesEmitted; + } + + public long rowsReceived() { + return rowsReceived; + } + + public long rowsEmitted() { + return rowsEmitted; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("occupied_rows", occupiedRows); builder.field("ram_bytes_used", ramBytesUsed); builder.field("ram_used", ByteSizeValue.ofBytes(ramBytesUsed)); + builder.field("pages_received", pagesReceived); + builder.field("pages_emitted", pagesEmitted); + builder.field("rows_received", rowsReceived); + builder.field("rows_emitted", rowsEmitted); return builder.endObject(); } @@ -72,12 +126,17 @@ public boolean equals(Object o) { return false; } TopNOperatorStatus that = (TopNOperatorStatus) o; - return occupiedRows == that.occupiedRows && ramBytesUsed == that.ramBytesUsed; + return occupiedRows == that.occupiedRows + && ramBytesUsed == that.ramBytesUsed + && pagesReceived == that.pagesReceived + && pagesEmitted == that.pagesEmitted + && rowsReceived == that.rowsReceived + && rowsEmitted == that.rowsEmitted; } @Override public int hashCode() { - return Objects.hash(occupiedRows, ramBytesUsed); + return Objects.hash(occupiedRows, ramBytesUsed, pagesReceived, pagesEmitted, rowsReceived, rowsEmitted); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java index def0710644d22..28aa9e7976c79 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java @@ -20,7 +20,19 @@ public class LuceneSourceOperatorStatusTests extends AbstractWireSerializingTestCase { public static LuceneSourceOperator.Status simple() { - return new LuceneSourceOperator.Status(2, Set.of("*:*"), new TreeSet<>(List.of("a:0", "a:1")), 1002, 0, 1, 5, 123, 99990, 8000); + return new LuceneSourceOperator.Status( + 2, + Set.of("*:*"), + new TreeSet<>(List.of("a:0", "a:1")), + 1002, + 0, + 1, + 5, + 123, + 99990, + 8000, + 222 + ); } public static String simpleToJson() { @@ -41,7 +53,8 @@ public static String simpleToJson() { "pages_emitted" : 5, "slice_min" : 123, "slice_max" : 99990, - "current" : 8000 + "current" : 8000, + "rows_emitted" : 222 }"""; } @@ -66,7 +79,8 @@ public LuceneSourceOperator.Status createTestInstance() { randomNonNegativeInt(), randomNonNegativeInt(), randomNonNegativeInt(), - randomNonNegativeInt() + randomNonNegativeInt(), + randomNonNegativeLong() ); } @@ -100,7 +114,8 @@ protected LuceneSourceOperator.Status mutateInstance(LuceneSourceOperator.Status int sliceMin = instance.sliceMin(); int sliceMax = instance.sliceMax(); int current = instance.current(); - switch (between(0, 9)) { + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 10)) { case 0 -> processedSlices = randomValueOtherThan(processedSlices, ESTestCase::randomNonNegativeInt); case 1 -> processedQueries = randomValueOtherThan(processedQueries, LuceneSourceOperatorStatusTests::randomProcessedQueries); case 2 -> processedShards = randomValueOtherThan(processedShards, LuceneSourceOperatorStatusTests::randomProcessedShards); @@ -111,6 +126,7 @@ protected LuceneSourceOperator.Status mutateInstance(LuceneSourceOperator.Status case 7 -> sliceMin = randomValueOtherThan(sliceMin, ESTestCase::randomNonNegativeInt); case 8 -> sliceMax = randomValueOtherThan(sliceMax, ESTestCase::randomNonNegativeInt); case 9 -> current = randomValueOtherThan(current, ESTestCase::randomNonNegativeInt); + case 10 -> rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); default -> throw new UnsupportedOperationException(); } return new LuceneSourceOperator.Status( @@ -123,7 +139,8 @@ protected LuceneSourceOperator.Status mutateInstance(LuceneSourceOperator.Status pagesEmitted, sliceMin, sliceMax, - current + current, + rowsEmitted ); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java index 5887da0bc466b..4303137f74bb3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java @@ -20,7 +20,7 @@ public class ValuesSourceReaderOperatorStatusTests extends AbstractWireSerializingTestCase { public static ValuesSourceReaderOperator.Status simple() { - return new ValuesSourceReaderOperator.Status(Map.of("ReaderType", 3), 1022323, 123); + return new ValuesSourceReaderOperator.Status(Map.of("ReaderType", 3), 1022323, 123, 111, 222); } public static String simpleToJson() { @@ -31,7 +31,9 @@ public static String simpleToJson() { }, "process_nanos" : 1022323, "process_time" : "1ms", - "pages_processed" : 123 + "pages_processed" : 123, + "rows_received" : 111, + "rows_emitted" : 222 }"""; } @@ -46,7 +48,13 @@ protected Writeable.Reader instanceReader() { @Override public ValuesSourceReaderOperator.Status createTestInstance() { - return new ValuesSourceReaderOperator.Status(randomReadersBuilt(), randomNonNegativeLong(), randomNonNegativeInt()); + return new ValuesSourceReaderOperator.Status( + randomReadersBuilt(), + randomNonNegativeLong(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); } private Map randomReadersBuilt() { @@ -63,12 +71,16 @@ protected ValuesSourceReaderOperator.Status mutateInstance(ValuesSourceReaderOpe Map readersBuilt = instance.readersBuilt(); long processNanos = instance.processNanos(); int pagesProcessed = instance.pagesProcessed(); - switch (between(0, 2)) { + long rowsReceived = instance.rowsReceived(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 4)) { case 0 -> readersBuilt = randomValueOtherThan(readersBuilt, this::randomReadersBuilt); case 1 -> processNanos = randomValueOtherThan(processNanos, ESTestCase::randomNonNegativeLong); case 2 -> pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + case 3 -> rowsReceived = randomValueOtherThan(rowsReceived, ESTestCase::randomNonNegativeLong); + case 4 -> rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); default -> throw new UnsupportedOperationException(); } - return new ValuesSourceReaderOperator.Status(readersBuilt, processNanos, pagesProcessed); + return new ValuesSourceReaderOperator.Status(readersBuilt, processNanos, pagesProcessed, rowsReceived, rowsEmitted); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingOperatorStatusTests.java index 3c04e6e5a9f57..3e8aaf4f6b0bd 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingOperatorStatusTests.java @@ -16,7 +16,7 @@ public class AbstractPageMappingOperatorStatusTests extends AbstractWireSerializingTestCase { public static AbstractPageMappingOperator.Status simple() { - return new AbstractPageMappingOperator.Status(200012, 123); + return new AbstractPageMappingOperator.Status(200012, 123, 111, 222); } public static String simpleToJson() { @@ -24,7 +24,9 @@ public static String simpleToJson() { { "process_nanos" : 200012, "process_time" : "200micros", - "pages_processed" : 123 + "pages_processed" : 123, + "rows_received" : 111, + "rows_emitted" : 222 }"""; } @@ -39,18 +41,27 @@ protected Writeable.Reader instanceReader() @Override public AbstractPageMappingOperator.Status createTestInstance() { - return new AbstractPageMappingOperator.Status(randomNonNegativeLong(), randomNonNegativeInt()); + return new AbstractPageMappingOperator.Status( + randomNonNegativeLong(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); } @Override protected AbstractPageMappingOperator.Status mutateInstance(AbstractPageMappingOperator.Status instance) { long processNanos = instance.processNanos(); int pagesProcessed = instance.pagesProcessed(); - switch (between(0, 1)) { + long rowsReceived = instance.rowsReceived(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 3)) { case 0 -> processNanos = randomValueOtherThan(processNanos, ESTestCase::randomNonNegativeLong); case 1 -> pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + case 2 -> rowsReceived = randomValueOtherThan(rowsReceived, ESTestCase::randomNonNegativeLong); + case 3 -> rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); default -> throw new UnsupportedOperationException(); } - return new AbstractPageMappingOperator.Status(processNanos, pagesProcessed); + return new AbstractPageMappingOperator.Status(processNanos, pagesProcessed, rowsReceived, rowsEmitted); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingToIteratorOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingToIteratorOperatorStatusTests.java index 41db82b9b4c8c..b131c43ad6481 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingToIteratorOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingToIteratorOperatorStatusTests.java @@ -17,7 +17,7 @@ public class AbstractPageMappingToIteratorOperatorStatusTests extends AbstractWireSerializingTestCase< AbstractPageMappingToIteratorOperator.Status> { public static AbstractPageMappingToIteratorOperator.Status simple() { - return new AbstractPageMappingToIteratorOperator.Status(200012, 123, 204); + return new AbstractPageMappingToIteratorOperator.Status(200012, 123, 204, 111, 222); } public static String simpleToJson() { @@ -26,7 +26,9 @@ public static String simpleToJson() { "process_nanos" : 200012, "process_time" : "200micros", "pages_received" : 123, - "pages_emitted" : 204 + "pages_emitted" : 204, + "rows_received" : 111, + "rows_emitted" : 222 }"""; } @@ -41,7 +43,13 @@ protected Writeable.Reader instanc @Override public AbstractPageMappingToIteratorOperator.Status createTestInstance() { - return new AbstractPageMappingToIteratorOperator.Status(randomNonNegativeLong(), randomNonNegativeInt(), randomNonNegativeInt()); + return new AbstractPageMappingToIteratorOperator.Status( + randomNonNegativeLong(), + randomNonNegativeInt(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); } @Override @@ -49,12 +57,16 @@ protected AbstractPageMappingToIteratorOperator.Status mutateInstance(AbstractPa long processNanos = instance.processNanos(); int pagesReceived = instance.pagesReceived(); int pagesEmitted = instance.pagesEmitted(); - switch (between(0, 2)) { + long rowsReceived = instance.rowsReceived(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 4)) { case 0 -> processNanos = randomValueOtherThan(processNanos, ESTestCase::randomNonNegativeLong); case 1 -> pagesReceived = randomValueOtherThan(pagesReceived, ESTestCase::randomNonNegativeInt); case 2 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); + case 3 -> rowsReceived = randomValueOtherThan(rowsReceived, ESTestCase::randomNonNegativeLong); + case 4 -> rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); default -> throw new UnsupportedOperationException(); } - return new AbstractPageMappingToIteratorOperator.Status(processNanos, pagesReceived, pagesEmitted); + return new AbstractPageMappingToIteratorOperator.Status(processNanos, pagesReceived, pagesEmitted, rowsReceived, rowsEmitted); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorStatusTests.java index f9d806b72cb46..ba6c3ea0c153a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorStatusTests.java @@ -16,7 +16,7 @@ public class AggregationOperatorStatusTests extends AbstractWireSerializingTestCase { public static AggregationOperator.Status simple() { - return new AggregationOperator.Status(200012, 400036, 123); + return new AggregationOperator.Status(200012, 400036, 123, 111, 222); } public static String simpleToJson() { @@ -26,7 +26,9 @@ public static String simpleToJson() { "aggregation_time" : "200micros", "aggregation_finish_nanos" : 400036, "aggregation_finish_time" : "400micros", - "pages_processed" : 123 + "pages_processed" : 123, + "rows_received" : 111, + "rows_emitted" : 222 }"""; } @@ -41,7 +43,13 @@ protected Writeable.Reader instanceReader() { @Override public AggregationOperator.Status createTestInstance() { - return new AggregationOperator.Status(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeInt()); + return new AggregationOperator.Status( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); } @Override @@ -49,12 +57,16 @@ protected AggregationOperator.Status mutateInstance(AggregationOperator.Status i long aggregationNanos = instance.aggregationNanos(); long aggregationFinishNanos = instance.aggregationFinishNanos(); int pagesProcessed = instance.pagesProcessed(); - switch (between(0, 2)) { + long rowsReceived = instance.rowsReceived(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 4)) { case 0 -> aggregationNanos = randomValueOtherThan(aggregationNanos, ESTestCase::randomNonNegativeLong); case 1 -> aggregationFinishNanos = randomValueOtherThan(aggregationFinishNanos, ESTestCase::randomNonNegativeLong); case 2 -> pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + case 3 -> rowsReceived = randomValueOtherThan(rowsReceived, ESTestCase::randomNonNegativeLong); + case 4 -> rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); default -> throw new UnsupportedOperationException(); } - return new AggregationOperator.Status(aggregationNanos, aggregationFinishNanos, pagesProcessed); + return new AggregationOperator.Status(aggregationNanos, aggregationFinishNanos, pagesProcessed, rowsReceived, rowsEmitted); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java index ba3e7d816e42c..3d4c8b8ed226e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java @@ -8,10 +8,20 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; import org.hamcrest.Matcher; +import java.io.IOException; + +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.matchesPattern; /** @@ -74,6 +84,38 @@ public final void testSimpleToString() { } } + /** + * Ensures that the Operator.Status of this operator has the standard fields. + */ + public void testOperatorStatus() throws IOException { + DriverContext driverContext = driverContext(); + try (var operator = simple().get(driverContext)) { + Operator.Status status = operator.status(); + + assumeTrue("Operator does not provide a status", status != null); + + var xContent = XContentType.JSON.xContent(); + try (var xContentBuilder = XContentBuilder.builder(xContent)) { + status.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + + var bytesReference = BytesReference.bytes(xContentBuilder); + var map = XContentHelper.convertToMap(bytesReference, false, xContentBuilder.contentType()).v2(); + + if (operator instanceof SourceOperator) { + assertThat(map, hasKey("pages_emitted")); + assertThat(map, hasKey("rows_emitted")); + } else if (operator instanceof SinkOperator) { + assertThat(map, hasKey("pages_received")); + assertThat(map, hasKey("rows_received")); + } else { + assertThat(map, either(hasKey("pages_processed")).or(both(hasKey("pages_received")).and(hasKey("pages_emitted")))); + assertThat(map, hasKey("rows_received")); + assertThat(map, hasKey("rows_emitted")); + } + } + } + } + /** * A {@link DriverContext} with a nonBreakingBigArrays. */ diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index fbcf11cd948c0..38f25244cd917 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -110,7 +110,7 @@ protected Page createPage(int positionOffset, int length) { } }; int maxConcurrentRequests = randomIntBetween(1, 10); - AsyncOperator asyncOperator = new AsyncOperator(driverContext, maxConcurrentRequests) { + AsyncOperator asyncOperator = new AsyncOperator(driverContext, maxConcurrentRequests) { final LookupService lookupService = new LookupService(threadPool, globalBlockFactory, dict, maxConcurrentRequests); @Override @@ -118,6 +118,16 @@ protected void performAsync(Page inputPage, ActionListener listener) { lookupService.lookupAsync(inputPage, listener); } + @Override + public Page getOutput() { + return fetchFromBuffer(); + } + + @Override + protected void releaseFetchedOnAnyThread(Page page) { + releasePageOnAnyThread(page); + } + @Override public void doClose() { @@ -159,7 +169,7 @@ public void doClose() { Releasables.close(localBreaker); } - class TestOp extends AsyncOperator { + class TestOp extends AsyncOperator { Map> handlers = new HashMap<>(); TestOp(DriverContext driverContext, int maxOutstandingRequests) { @@ -171,6 +181,16 @@ protected void performAsync(Page inputPage, ActionListener listener) { handlers.put(inputPage, listener); } + @Override + public Page getOutput() { + return fetchFromBuffer(); + } + + @Override + protected void releaseFetchedOnAnyThread(Page page) { + releasePageOnAnyThread(page); + } + @Override protected void doClose() { @@ -233,7 +253,7 @@ public void testFailure() throws Exception { ); int maxConcurrentRequests = randomIntBetween(1, 10); AtomicBoolean failed = new AtomicBoolean(); - AsyncOperator asyncOperator = new AsyncOperator(driverContext, maxConcurrentRequests) { + AsyncOperator asyncOperator = new AsyncOperator(driverContext, maxConcurrentRequests) { @Override protected void performAsync(Page inputPage, ActionListener listener) { ActionRunnable command = new ActionRunnable<>(listener) { @@ -256,6 +276,16 @@ protected void doRun() { } } + @Override + public Page getOutput() { + return fetchFromBuffer(); + } + + @Override + protected void releaseFetchedOnAnyThread(Page page) { + releasePageOnAnyThread(page); + } + @Override protected void doClose() { @@ -285,7 +315,7 @@ public void testIsFinished() { for (int i = 0; i < iters; i++) { DriverContext driverContext = new DriverContext(blockFactory.bigArrays(), blockFactory); CyclicBarrier barrier = new CyclicBarrier(2); - AsyncOperator asyncOperator = new AsyncOperator(driverContext, between(1, 10)) { + AsyncOperator asyncOperator = new AsyncOperator(driverContext, between(1, 10)) { @Override protected void performAsync(Page inputPage, ActionListener listener) { ActionRunnable command = new ActionRunnable<>(listener) { @@ -302,6 +332,16 @@ protected void doRun() { threadPool.executor(ESQL_TEST_EXECUTOR).execute(command); } + @Override + public Page getOutput() { + return fetchFromBuffer(); + } + + @Override + protected void releaseFetchedOnAnyThread(Page page) { + releasePageOnAnyThread(page); + } + @Override protected void doClose() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java index 694aaba4bd85e..b067c44a289b4 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -18,9 +19,14 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.compute.data.BasicBlockTests; +import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; +import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator; +import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler; +import org.elasticsearch.compute.operator.exchange.ExchangeSourceOperator; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.FixedExecutorBuilder; @@ -35,8 +41,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.function.LongSupplier; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; public class DriverTests extends ESTestCase { @@ -273,6 +282,76 @@ public Page getOutput() { } } + public void testEarlyTermination() { + DriverContext driverContext = driverContext(); + ThreadPool threadPool = threadPool(); + try { + int positions = between(1000, 5000); + List inPages = randomList(1, 100, () -> { + var block = driverContext.blockFactory().newConstantIntBlockWith(randomInt(), positions); + return new Page(block); + }); + final var sourceOperator = new CannedSourceOperator(inPages.iterator()); + final int maxAllowedRows = between(1, 100); + final AtomicInteger processedRows = new AtomicInteger(0); + var sinkHandler = new ExchangeSinkHandler(driverContext.blockFactory(), positions, System::currentTimeMillis); + var sinkOperator = new ExchangeSinkOperator(sinkHandler.createExchangeSink(), Function.identity()); + final var delayOperator = new EvalOperator(driverContext.blockFactory(), new EvalOperator.ExpressionEvaluator() { + @Override + public Block eval(Page page) { + for (int i = 0; i < page.getPositionCount(); i++) { + driverContext.checkForEarlyTermination(); + if (processedRows.incrementAndGet() >= maxAllowedRows) { + sinkHandler.fetchPageAsync(true, ActionListener.noop()); + } + } + return driverContext.blockFactory().newConstantBooleanBlockWith(true, page.getPositionCount()); + } + + @Override + public void close() { + + } + }); + Driver driver = new Driver(driverContext, sourceOperator, List.of(delayOperator), sinkOperator, () -> {}); + ThreadContext threadContext = threadPool.getThreadContext(); + PlainActionFuture future = new PlainActionFuture<>(); + + Driver.start(threadContext, threadPool.executor("esql"), driver, between(1, 1000), future); + future.actionGet(30, TimeUnit.SECONDS); + assertThat(processedRows.get(), equalTo(maxAllowedRows)); + } finally { + terminate(threadPool); + } + } + + public void testResumeOnEarlyFinish() throws Exception { + DriverContext driverContext = driverContext(); + ThreadPool threadPool = threadPool(); + try { + PlainActionFuture sourceFuture = new PlainActionFuture<>(); + var sourceHandler = new ExchangeSourceHandler(between(1, 5), threadPool.executor("esql"), sourceFuture); + var sinkHandler = new ExchangeSinkHandler(driverContext.blockFactory(), between(1, 5), System::currentTimeMillis); + var sourceOperator = new ExchangeSourceOperator(sourceHandler.createExchangeSource()); + var sinkOperator = new ExchangeSinkOperator(sinkHandler.createExchangeSink(), Function.identity()); + Driver driver = new Driver(driverContext, sourceOperator, List.of(), sinkOperator, () -> {}); + PlainActionFuture future = new PlainActionFuture<>(); + Driver.start(threadPool.getThreadContext(), threadPool.executor("esql"), driver, between(1, 1000), future); + assertBusy( + () -> assertThat( + driver.status().status(), + either(equalTo(DriverStatus.Status.ASYNC)).or(equalTo(DriverStatus.Status.STARTING)) + ) + ); + sinkHandler.fetchPageAsync(true, ActionListener.noop()); + future.actionGet(5, TimeUnit.SECONDS); + assertThat(driver.status().status(), equalTo(DriverStatus.Status.DONE)); + sourceFuture.actionGet(5, TimeUnit.SECONDS); + } finally { + terminate(threadPool); + } + } + private static void assertRunningWithRegularUser(ThreadPool threadPool) { String user = threadPool.getThreadContext().getHeader("user"); assertThat(user, equalTo("user1")); @@ -291,7 +370,7 @@ private static Page randomPage() { return new Page(block.block()); } - static class SwitchContextOperator extends AsyncOperator { + static class SwitchContextOperator extends AsyncOperator { private final ThreadPool threadPool; SwitchContextOperator(DriverContext driverContext, ThreadPool threadPool) { @@ -314,6 +393,16 @@ protected void performAsync(Page page, ActionListener listener) { }), TimeValue.timeValueNanos(between(1, 1_000_000)), threadPool.executor("esql")); } + @Override + public Page getOutput() { + return fetchFromBuffer(); + } + + @Override + protected void releaseFetchedOnAnyThread(Page page) { + releasePageOnAnyThread(page); + } + @Override protected void doClose() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorStatusTests.java index 245ae171c630b..93e1a9f8d221d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorStatusTests.java @@ -16,7 +16,7 @@ public class HashAggregationOperatorStatusTests extends AbstractWireSerializingTestCase { public static HashAggregationOperator.Status simple() { - return new HashAggregationOperator.Status(500012, 200012, 123); + return new HashAggregationOperator.Status(500012, 200012, 123, 111, 222); } public static String simpleToJson() { @@ -26,7 +26,9 @@ public static String simpleToJson() { "hash_time" : "500micros", "aggregation_nanos" : 200012, "aggregation_time" : "200micros", - "pages_processed" : 123 + "pages_processed" : 123, + "rows_received" : 111, + "rows_emitted" : 222 }"""; } @@ -41,7 +43,13 @@ protected Writeable.Reader instanceReader() { @Override public HashAggregationOperator.Status createTestInstance() { - return new HashAggregationOperator.Status(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeInt()); + return new HashAggregationOperator.Status( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); } @Override @@ -49,12 +57,16 @@ protected HashAggregationOperator.Status mutateInstance(HashAggregationOperator. long hashNanos = instance.hashNanos(); long aggregationNanos = instance.aggregationNanos(); int pagesProcessed = instance.pagesProcessed(); - switch (between(0, 2)) { + long rowsReceived = instance.rowsReceived(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 4)) { case 0 -> hashNanos = randomValueOtherThan(hashNanos, ESTestCase::randomNonNegativeLong); case 1 -> aggregationNanos = randomValueOtherThan(aggregationNanos, ESTestCase::randomNonNegativeLong); case 2 -> pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + case 3 -> rowsReceived = randomValueOtherThan(rowsReceived, ESTestCase::randomNonNegativeLong); + case 4 -> rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); default -> throw new UnsupportedOperationException(); } - return new HashAggregationOperator.Status(hashNanos, aggregationNanos, pagesProcessed); + return new HashAggregationOperator.Status(hashNanos, aggregationNanos, pagesProcessed, rowsReceived, rowsEmitted); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitStatusTests.java index fd2b75f6bd819..016c8a85f94cd 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitStatusTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -17,8 +18,8 @@ public class LimitStatusTests extends AbstractWireSerializingTestCase { public void testToXContent() { - assertThat(Strings.toString(new LimitOperator.Status(10, 1, 1)), equalTo(""" - {"limit":10,"limit_remaining":1,"pages_processed":1}""")); + assertThat(Strings.toString(new LimitOperator.Status(10, 1, 1, 111, 222)), equalTo(""" + {"limit":10,"limit_remaining":1,"pages_processed":1,"rows_received":111,"rows_emitted":222}""")); } @Override @@ -28,7 +29,13 @@ protected Writeable.Reader instanceReader() { @Override protected LimitOperator.Status createTestInstance() { - return new LimitOperator.Status(between(0, Integer.MAX_VALUE), between(0, Integer.MAX_VALUE), between(0, Integer.MAX_VALUE)); + return new LimitOperator.Status( + randomNonNegativeInt(), + randomNonNegativeInt(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); } @Override @@ -36,19 +43,27 @@ protected LimitOperator.Status mutateInstance(LimitOperator.Status instance) thr int limit = instance.limit(); int limitRemaining = instance.limitRemaining(); int pagesProcessed = instance.pagesProcessed(); - switch (between(0, 2)) { + long rowsReceived = instance.rowsReceived(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 4)) { case 0: - limit = randomValueOtherThan(limit, () -> between(0, Integer.MAX_VALUE)); + limit = randomValueOtherThan(limit, ESTestCase::randomNonNegativeInt); break; case 1: - limitRemaining = randomValueOtherThan(limitRemaining, () -> between(0, Integer.MAX_VALUE)); + limitRemaining = randomValueOtherThan(limitRemaining, ESTestCase::randomNonNegativeInt); break; case 2: - pagesProcessed = randomValueOtherThan(pagesProcessed, () -> between(0, Integer.MAX_VALUE)); + pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + break; + case 3: + rowsReceived = randomValueOtherThan(rowsReceived, ESTestCase::randomNonNegativeLong); + break; + case 4: + rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); break; default: throw new IllegalArgumentException(); } - return new LimitOperator.Status(limit, limitRemaining, pagesProcessed); + return new LimitOperator.Status(limit, limitRemaining, pagesProcessed, rowsReceived, rowsEmitted); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorStatusTests.java index 9527388a0d3cf..a421ba360e4aa 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorStatusTests.java @@ -16,12 +16,12 @@ public class MvExpandOperatorStatusTests extends AbstractWireSerializingTestCase { public static MvExpandOperator.Status simple() { - return new MvExpandOperator.Status(10, 15, 9); + return new MvExpandOperator.Status(10, 15, 9, 111, 222); } public static String simpleToJson() { return """ - {"pages_in":10,"pages_out":15,"noops":9}"""; + {"pages_received":10,"pages_emitted":15,"noops":9,"rows_received":111,"rows_emitted":222}"""; } public void testToXContent() { @@ -35,32 +35,30 @@ protected Writeable.Reader instanceReader() { @Override public MvExpandOperator.Status createTestInstance() { - return new MvExpandOperator.Status(randomNonNegativeInt(), randomNonNegativeInt(), randomNonNegativeInt()); + return new MvExpandOperator.Status( + randomNonNegativeInt(), + randomNonNegativeInt(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); } @Override protected MvExpandOperator.Status mutateInstance(MvExpandOperator.Status instance) { - switch (between(0, 2)) { - case 0: - return new MvExpandOperator.Status( - randomValueOtherThan(instance.pagesIn(), ESTestCase::randomNonNegativeInt), - instance.pagesOut(), - instance.noops() - ); - case 1: - return new MvExpandOperator.Status( - instance.pagesIn(), - randomValueOtherThan(instance.pagesOut(), ESTestCase::randomNonNegativeInt), - instance.noops() - ); - case 2: - return new MvExpandOperator.Status( - instance.pagesIn(), - instance.pagesOut(), - randomValueOtherThan(instance.noops(), ESTestCase::randomNonNegativeInt) - ); - default: - throw new UnsupportedOperationException(); + int pagesReceived = instance.pagesReceived(); + int pagesEmitted = instance.pagesEmitted(); + int noops = instance.noops(); + long rowsReceived = instance.rowsReceived(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 4)) { + case 0 -> pagesReceived = randomValueOtherThan(instance.pagesReceived(), ESTestCase::randomNonNegativeInt); + case 1 -> pagesEmitted = randomValueOtherThan(instance.pagesEmitted(), ESTestCase::randomNonNegativeInt); + case 2 -> noops = randomValueOtherThan(instance.noops(), ESTestCase::randomNonNegativeInt); + case 3 -> rowsReceived = randomValueOtherThan(instance.rowsReceived(), ESTestCase::randomNonNegativeLong); + case 4 -> rowsEmitted = randomValueOtherThan(instance.rowsEmitted(), ESTestCase::randomNonNegativeLong); + default -> throw new UnsupportedOperationException(); } + return new MvExpandOperator.Status(pagesReceived, pagesEmitted, noops, rowsReceived, rowsEmitted); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java index 9442fb05761de..b07ff8b0da571 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java @@ -210,8 +210,8 @@ public void testNoopStatus() { assertThat(result, hasSize(1)); assertThat(valuesAtPositions(result.get(0).getBlock(0), 0, 2), equalTo(List.of(List.of(1), List.of(2)))); MvExpandOperator.Status status = op.status(); - assertThat(status.pagesIn(), equalTo(1)); - assertThat(status.pagesOut(), equalTo(1)); + assertThat(status.pagesReceived(), equalTo(1)); + assertThat(status.pagesEmitted(), equalTo(1)); assertThat(status.noops(), equalTo(1)); } @@ -223,8 +223,8 @@ public void testExpandStatus() { assertThat(result, hasSize(1)); assertThat(valuesAtPositions(result.get(0).getBlock(0), 0, 2), equalTo(List.of(List.of(1), List.of(2)))); MvExpandOperator.Status status = op.status(); - assertThat(status.pagesIn(), equalTo(1)); - assertThat(status.pagesOut(), equalTo(1)); + assertThat(status.pagesReceived(), equalTo(1)); + assertThat(status.pagesEmitted(), equalTo(1)); assertThat(status.noops(), equalTo(0)); result.forEach(Page::releaseBlocks); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index 54db0453530bc..28b7095aa1bde 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -212,7 +212,8 @@ protected final void assertSimple(DriverContext context, int size) { // Clone the input so that the operator can close it, then, later, we can read it again to build the assertion. List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); - List results = drive(simple().get(context), input.iterator(), context); + var operator = simple().get(context); + List results = drive(operator, input.iterator(), context); assertSimpleOutput(origInput, results); assertThat(context.breaker().getUsed(), equalTo(0L)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java index 369913c7d152c..aa2ca2faebbd5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -21,13 +22,14 @@ public void testToXContent() { } public static ExchangeSinkOperator.Status simple() { - return new ExchangeSinkOperator.Status(10); + return new ExchangeSinkOperator.Status(10, 111); } public static String simpleToJson() { return """ { - "pages_accepted" : 10 + "pages_received" : 10, + "rows_received" : 111 }"""; } @@ -38,11 +40,18 @@ protected Writeable.Reader instanceReader() { @Override public ExchangeSinkOperator.Status createTestInstance() { - return new ExchangeSinkOperator.Status(between(0, Integer.MAX_VALUE)); + return new ExchangeSinkOperator.Status(randomNonNegativeInt(), randomNonNegativeLong()); } @Override protected ExchangeSinkOperator.Status mutateInstance(ExchangeSinkOperator.Status instance) throws IOException { - return new ExchangeSinkOperator.Status(randomValueOtherThan(instance.pagesAccepted(), () -> between(0, Integer.MAX_VALUE))); + int pagesReceived = instance.pagesReceived(); + long rowsReceived = instance.rowsReceived(); + switch (between(0, 1)) { + case 0 -> pagesReceived = randomValueOtherThan(pagesReceived, ESTestCase::randomNonNegativeInt); + case 1 -> rowsReceived = randomValueOtherThan(rowsReceived, ESTestCase::randomNonNegativeLong); + default -> throw new UnsupportedOperationException(); + } + return new ExchangeSinkOperator.Status(pagesReceived, rowsReceived); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperatorStatusTests.java index 2c5f7eebbaf3d..e99ea69af54d9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperatorStatusTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -17,8 +18,8 @@ public class ExchangeSourceOperatorStatusTests extends AbstractWireSerializingTestCase { public void testToXContent() { - assertThat(Strings.toString(new ExchangeSourceOperator.Status(0, 10)), equalTo(""" - {"pages_waiting":0,"pages_emitted":10}""")); + assertThat(Strings.toString(new ExchangeSourceOperator.Status(0, 10, 111)), equalTo(""" + {"pages_waiting":0,"pages_emitted":10,"rows_emitted":111}""")); } @Override @@ -28,24 +29,20 @@ protected Writeable.Reader instanceReader() { @Override protected ExchangeSourceOperator.Status createTestInstance() { - return new ExchangeSourceOperator.Status(between(0, Integer.MAX_VALUE), between(0, Integer.MAX_VALUE)); + return new ExchangeSourceOperator.Status(randomNonNegativeInt(), randomNonNegativeInt(), randomNonNegativeLong()); } @Override protected ExchangeSourceOperator.Status mutateInstance(ExchangeSourceOperator.Status instance) throws IOException { - switch (between(0, 1)) { - case 0: - return new ExchangeSourceOperator.Status( - randomValueOtherThan(instance.pagesWaiting(), () -> between(0, Integer.MAX_VALUE)), - instance.pagesEmitted() - ); - case 1: - return new ExchangeSourceOperator.Status( - instance.pagesWaiting(), - randomValueOtherThan(instance.pagesEmitted(), () -> between(0, Integer.MAX_VALUE)) - ); - default: - throw new UnsupportedOperationException(); + int pagesWaiting = instance.pagesWaiting(); + int pagesEmitted = instance.pagesEmitted(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 2)) { + case 0 -> pagesWaiting = randomValueOtherThan(pagesWaiting, ESTestCase::randomNonNegativeInt); + case 1 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); + case 2 -> rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); + default -> throw new UnsupportedOperationException(); } + return new ExchangeSourceOperator.Status(pagesWaiting, pagesEmitted, rowsEmitted); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatusTests.java index f52274b68bdf6..5faf5159a5465 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatusTests.java @@ -10,13 +10,30 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; public class TopNOperatorStatusTests extends AbstractWireSerializingTestCase { + public static TopNOperatorStatus simple() { + return new TopNOperatorStatus(10, 2000, 123, 123, 111, 222); + } + + public static String simpleToJson() { + return """ + { + "occupied_rows" : 10, + "ram_bytes_used" : 2000, + "ram_used" : "1.9kb", + "pages_received" : 123, + "pages_emitted" : 123, + "rows_received" : 111, + "rows_emitted" : 222 + }"""; + } + public void testToXContent() { - assertThat(Strings.toString(new TopNOperatorStatus(10, 2000)), equalTo(""" - {"occupied_rows":10,"ram_bytes_used":2000,"ram_used":"1.9kb"}""")); + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); } @Override @@ -26,23 +43,46 @@ protected Writeable.Reader instanceReader() { @Override protected TopNOperatorStatus createTestInstance() { - return new TopNOperatorStatus(randomNonNegativeInt(), randomNonNegativeLong()); + return new TopNOperatorStatus( + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeInt(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); } @Override protected TopNOperatorStatus mutateInstance(TopNOperatorStatus instance) { int occupiedRows = instance.occupiedRows(); long ramBytesUsed = instance.ramBytesUsed(); - switch (between(0, 1)) { + int pagesReceived = instance.pagesReceived(); + int pagesEmitted = instance.pagesEmitted(); + long rowsReceived = instance.rowsReceived(); + long rowsEmitted = instance.rowsEmitted(); + switch (between(0, 5)) { case 0: - occupiedRows = randomValueOtherThan(occupiedRows, () -> randomNonNegativeInt()); + occupiedRows = randomValueOtherThan(occupiedRows, ESTestCase::randomNonNegativeInt); break; case 1: - ramBytesUsed = randomValueOtherThan(ramBytesUsed, () -> randomNonNegativeLong()); + ramBytesUsed = randomValueOtherThan(ramBytesUsed, ESTestCase::randomNonNegativeLong); + break; + case 2: + pagesReceived = randomValueOtherThan(pagesReceived, ESTestCase::randomNonNegativeInt); + break; + case 3: + pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); + break; + case 4: + rowsReceived = randomValueOtherThan(rowsReceived, ESTestCase::randomNonNegativeLong); + break; + case 5: + rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); break; default: throw new IllegalArgumentException(); } - return new TopNOperatorStatus(occupiedRows, ramBytesUsed); + return new TopNOperatorStatus(occupiedRows, ramBytesUsed, pagesReceived, pagesEmitted, rowsReceived, rowsEmitted); } } diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index ce4aa8582929b..98e5799c8d3f2 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -548,7 +548,7 @@ record Listen(long timestamp, String songId, double duration) { public void testLookupJoinIndexAllowed() throws Exception { assumeTrue( "Requires LOOKUP JOIN capability", - EsqlSpecTestCase.hasCapabilities(adminClient(), List.of(EsqlCapabilities.Cap.JOIN_LOOKUP_V10.capabilityName())) + EsqlSpecTestCase.hasCapabilities(adminClient(), List.of(EsqlCapabilities.Cap.JOIN_LOOKUP_V11.capabilityName())) ); Response resp = runESQLCommand( @@ -587,7 +587,7 @@ public void testLookupJoinIndexAllowed() throws Exception { public void testLookupJoinIndexForbidden() throws Exception { assumeTrue( "Requires LOOKUP JOIN capability", - EsqlSpecTestCase.hasCapabilities(adminClient(), List.of(EsqlCapabilities.Cap.JOIN_LOOKUP_V10.capabilityName())) + EsqlSpecTestCase.hasCapabilities(adminClient(), List.of(EsqlCapabilities.Cap.JOIN_LOOKUP_V11.capabilityName())) ); var resp = expectThrows( diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index b22925b44ebab..3b5377c2768fb 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.qa.mixed; import org.elasticsearch.Version; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.xpack.esql.CsvSpecReader.CsvTestCase; @@ -21,8 +20,7 @@ import java.util.List; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V10; -import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.ASYNC; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V11; public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { @ClassRule @@ -49,10 +47,6 @@ protected static boolean oldClusterHasFeature(String featureId) { return oldClusterTestFeatureService.clusterHasFeature(featureId); } - protected static boolean oldClusterHasFeature(NodeFeature feature) { - return oldClusterHasFeature(feature.id()); - } - @AfterClass public static void cleanUp() { oldClusterTestFeatureService = null; @@ -74,14 +68,6 @@ public MixedClusterEsqlSpecIT( protected void shouldSkipTest(String testName) throws IOException { super.shouldSkipTest(testName); assumeTrue("Test " + testName + " is skipped on " + bwcVersion, isEnabled(testName, instructions, bwcVersion)); - if (mode == ASYNC) { - assumeTrue("Async is not supported on " + bwcVersion, supportsAsync()); - } - } - - @Override - protected boolean supportsAsync() { - return oldClusterHasFeature(ASYNC_QUERY_FEATURE_ID); } @Override @@ -96,7 +82,7 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - return hasCapabilities(List.of(JOIN_LOOKUP_V10.capabilityName())); + return hasCapabilities(List.of(JOIN_LOOKUP_V11.capabilityName())); } @Override diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 987a5334f903c..f8b921f239923 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V10; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V11; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; @@ -124,7 +124,7 @@ protected void shouldSkipTest(String testName) throws IOException { assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); - assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V10.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V11.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index 050259bbb5b5c..cae9e1ba8eb66 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -312,22 +312,23 @@ public void testProfile() throws IOException { } signatures.add(sig); } + var readProfile = matchesList().item("LuceneSourceOperator") + .item("ValuesSourceReaderOperator") + .item("AggregationOperator") + .item("ExchangeSinkOperator"); + var mergeProfile = matchesList().item("ExchangeSourceOperator") + .item("AggregationOperator") + .item("ProjectOperator") + .item("LimitOperator") + .item("EvalOperator") + .item("ProjectOperator") + .item("OutputOperator"); + var emptyReduction = matchesList().item("ExchangeSourceOperator").item("ExchangeSinkOperator"); + var reduction = matchesList().item("ExchangeSourceOperator").item("AggregationOperator").item("ExchangeSinkOperator"); assertThat( signatures, - containsInAnyOrder( - matchesList().item("LuceneSourceOperator") - .item("ValuesSourceReaderOperator") - .item("AggregationOperator") - .item("ExchangeSinkOperator"), - matchesList().item("ExchangeSourceOperator").item("ExchangeSinkOperator"), - matchesList().item("ExchangeSourceOperator") - .item("AggregationOperator") - .item("ProjectOperator") - .item("LimitOperator") - .item("EvalOperator") - .item("ProjectOperator") - .item("OutputOperator") - ) + Matchers.either(containsInAnyOrder(readProfile, reduction, mergeProfile)) + .or(containsInAnyOrder(readProfile, emptyReduction, mergeProfile)) ); } @@ -574,23 +575,35 @@ private String checkOperatorProfile(Map o) { .entry("slice_min", 0) .entry("current", DocIdSetIterator.NO_MORE_DOCS) .entry("pages_emitted", greaterThan(0)) + .entry("rows_emitted", greaterThan(0)) .entry("processing_nanos", greaterThan(0)) .entry("processed_queries", List.of("*:*")); case "ValuesSourceReaderOperator" -> basicProfile().entry("readers_built", matchesMap().extraOk()); case "AggregationOperator" -> matchesMap().entry("pages_processed", greaterThan(0)) + .entry("rows_received", greaterThan(0)) + .entry("rows_emitted", greaterThan(0)) .entry("aggregation_nanos", greaterThan(0)) .entry("aggregation_finish_nanos", greaterThan(0)); - case "ExchangeSinkOperator" -> matchesMap().entry("pages_accepted", greaterThan(0)); - case "ExchangeSourceOperator" -> matchesMap().entry("pages_emitted", greaterThan(0)).entry("pages_waiting", 0); + case "ExchangeSinkOperator" -> matchesMap().entry("pages_received", greaterThan(0)).entry("rows_received", greaterThan(0)); + case "ExchangeSourceOperator" -> matchesMap().entry("pages_waiting", 0) + .entry("pages_emitted", greaterThan(0)) + .entry("rows_emitted", greaterThan(0)); case "ProjectOperator", "EvalOperator" -> basicProfile(); case "LimitOperator" -> matchesMap().entry("pages_processed", greaterThan(0)) .entry("limit", 1000) - .entry("limit_remaining", 999); + .entry("limit_remaining", 999) + .entry("rows_received", greaterThan(0)) + .entry("rows_emitted", greaterThan(0)); case "OutputOperator" -> null; case "TopNOperator" -> matchesMap().entry("occupied_rows", 0) + .entry("pages_received", greaterThan(0)) + .entry("pages_emitted", greaterThan(0)) + .entry("rows_received", greaterThan(0)) + .entry("rows_emitted", greaterThan(0)) .entry("ram_used", instanceOf(String.class)) .entry("ram_bytes_used", greaterThan(0)); case "LuceneTopNSourceOperator" -> matchesMap().entry("pages_emitted", greaterThan(0)) + .entry("rows_emitted", greaterThan(0)) .entry("current", greaterThan(0)) .entry("processed_slices", greaterThan(0)) .entry("processed_shards", List.of("rest-esql-test:0")) @@ -611,7 +624,10 @@ private String checkOperatorProfile(Map o) { } private MapMatcher basicProfile() { - return matchesMap().entry("pages_processed", greaterThan(0)).entry("process_nanos", greaterThan(0)); + return matchesMap().entry("pages_processed", greaterThan(0)) + .entry("process_nanos", greaterThan(0)) + .entry("rows_received", greaterThan(0)) + .entry("rows_emitted", greaterThan(0)); } private void assertException(String query, String... errorMessages) throws IOException { diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index 49f5b01f247cf..18bfb6b8676ce 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -75,9 +75,6 @@ @TimeoutSuite(millis = 30 * TimeUnits.MINUTE) public abstract class EsqlSpecTestCase extends ESRestTestCase { - // To avoid referencing the main module, we replicate EsqlFeatures.ASYNC_QUERY.id() here - protected static final String ASYNC_QUERY_FEATURE_ID = "esql.async_query"; - private static final Logger LOGGER = LogManager.getLogger(EsqlSpecTestCase.class); private final String fileName; private final String groupName; @@ -140,10 +137,6 @@ public void setup() throws IOException { } } - protected boolean supportsAsync() { - return clusterHasFeature(ASYNC_QUERY_FEATURE_ID); // the Async API was introduced in 8.13.0 - } - @AfterClass public static void wipeTestData() throws IOException { try { @@ -281,7 +274,6 @@ protected boolean deduplicateExactWarnings() { private Map runEsql(RequestObjectBuilder requestObject, AssertWarnings assertWarnings) throws IOException { if (mode == Mode.ASYNC) { - assert supportsAsync(); return RestEsqlTestCase.runEsqlAsync(requestObject, assertWarnings); } else { return RestEsqlTestCase.runEsqlSync(requestObject, assertWarnings); diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java index ac5a3d4be27f3..5e0aeb5b3535d 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java @@ -227,7 +227,7 @@ public void testIndicesDontExist() throws IOException { assertThat(e.getMessage(), containsString("index_not_found_exception")); assertThat(e.getMessage(), anyOf(containsString("no such index [foo]"), containsString("no such index [remote_cluster:foo]"))); - if (EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()) { + if (EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()) { e = expectThrows( ResponseException.class, () -> runEsql(timestampFilter("gte", "2020-01-01").query(from("test1") + " | LOOKUP JOIN foo ON id1")) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index 71a65f059528a..f44653171a4f5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -1226,3 +1226,25 @@ FROM date_nanos millis:date | nanos:date_nanos | num:long 2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z | 1698064048948000000 ; + +Date Nanos Format +required_capability: date_nanos_date_format + +FROM date_nanos +| EVAL sv_nanos = MV_MAX(nanos) +| EVAL a = DATE_FORMAT(sv_nanos), b = DATE_FORMAT("yyyy-MM-dd", sv_nanos), c = DATE_FORMAT("strict_date_optional_time_nanos", sv_nanos) +| KEEP sv_nanos, a, b, c; +ignoreOrder:true + +sv_nanos:date_nanos | a:keyword | b:keyword | c:keyword +2023-10-23T13:55:01.543123456Z | 2023-10-23T13:55:01.543Z | 2023-10-23 | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832987654Z | 2023-10-23T13:53:55.832Z | 2023-10-23 | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015787878Z | 2023-10-23T13:52:55.015Z | 2023-10-23 | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732102837Z | 2023-10-23T13:51:54.732Z | 2023-10-23 | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937193000Z | 2023-10-23T13:33:34.937Z | 2023-10-23 | 2023-10-23T13:33:34.937193Z +2023-10-23T12:27:28.948000000Z | 2023-10-23T12:27:28.948Z | 2023-10-23 | 2023-10-23T12:27:28.948Z +2023-10-23T12:15:03.360103847Z | 2023-10-23T12:15:03.360Z | 2023-10-23 | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360103847Z | 2023-10-23T12:15:03.360Z | 2023-10-23 | 2023-10-23T12:15:03.360103847Z +2023-03-23T12:15:03.360103847Z | 2023-03-23T12:15:03.360Z | 2023-03-23 | 2023-03-23T12:15:03.360103847Z +2023-03-23T12:15:03.360103847Z | 2023-03-23T12:15:03.360Z | 2023-03-23 | 2023-03-23T12:15:03.360103847Z +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 95119cae95590..8d24ddb45602b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -8,7 +8,7 @@ ############################################### basicOnTheDataNode -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | EVAL language_code = languages @@ -25,7 +25,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; basicRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW language_code = 1 | LOOKUP JOIN languages_lookup ON language_code @@ -36,7 +36,7 @@ language_code:integer | language_name:keyword ; basicOnTheCoordinator -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | SORT emp_no @@ -53,7 +53,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; subsequentEvalOnTheDataNode -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | EVAL language_code = languages @@ -71,7 +71,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; subsequentEvalOnTheCoordinator -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | SORT emp_no @@ -89,7 +89,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; sortEvalBeforeLookup -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | SORT emp_no @@ -106,7 +106,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueLeftKeyOnTheDataNode -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | WHERE emp_no <= 10030 @@ -130,60 +130,69 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueRightKeyOnTheDataNode -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | EVAL language_code = emp_no % 10 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code | WHERE emp_no > 10090 AND emp_no < 10096 -| SORT emp_no -| EVAL country = MV_SORT(country) +| SORT emp_no, country | KEEP emp_no, language_code, language_name, country ; -emp_no:integer | language_code:integer | language_name:keyword | country:keyword -10091 | 1 | [English, English, English] | [Canada, United Kingdom, United States of America] -10092 | 2 | [German, German, German] | [Austria, Germany, Switzerland] -10093 | 3 | null | null -10094 | 4 | Quenya | null -10095 | 5 | null | Atlantis +emp_no:integer | language_code:integer | language_name:keyword | country:text + 10091 | 1 | English | Canada + 10091 | 1 | null | United Kingdom + 10091 | 1 | English | United States of America + 10091 | 1 | English | null + 10092 | 2 | German | [Germany, Austria] + 10092 | 2 | German | Switzerland + 10092 | 2 | German | null + 10093 | 3 | null | null + 10094 | 4 | Quenya | null + 10095 | 5 | null | Atlantis ; nonUniqueRightKeyOnTheCoordinator -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | SORT emp_no | LIMIT 5 | EVAL language_code = emp_no % 10 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code -| EVAL country = MV_SORT(country) | KEEP emp_no, language_code, language_name, country ; -emp_no:integer | language_code:integer | language_name:keyword | country:keyword -10001 | 1 | [English, English, English] | [Canada, United Kingdom, United States of America] -10002 | 2 | [German, German, German] | [Austria, Germany, Switzerland] -10003 | 3 | null | null -10004 | 4 | Quenya | null -10005 | 5 | null | Atlantis +emp_no:integer | language_code:integer | language_name:keyword | country:text +10001 | 1 | English | Canada +10001 | 1 | English | null +10001 | 1 | null | United Kingdom +10001 | 1 | English | United States of America +10002 | 2 | German | [Germany, Austria] +10002 | 2 | German | Switzerland +10002 | 2 | German | null +10003 | 3 | null | null +10004 | 4 | Quenya | null +10005 | 5 | null | Atlantis ; nonUniqueRightKeyFromRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW language_code = 2 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code | DROP country.keyword -| EVAL country = MV_SORT(country) ; -language_code:integer | language_name:keyword | country:keyword -2 | [German, German, German] | [Austria, Germany, Switzerland] +language_code:integer | country:text | language_name:keyword +2 | [Germany, Austria] | German +2 | Switzerland | German +2 | null | German ; repeatedIndexOnFrom -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM languages_lookup | LOOKUP JOIN languages_lookup ON language_code @@ -201,7 +210,7 @@ dropAllLookedUpFieldsOnTheDataNode-Ignore // Depends on // https://github.com/elastic/elasticsearch/issues/118778 // https://github.com/elastic/elasticsearch/issues/118781 -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | EVAL language_code = emp_no % 10 @@ -222,7 +231,7 @@ dropAllLookedUpFieldsOnTheCoordinator-Ignore // Depends on // https://github.com/elastic/elasticsearch/issues/118778 // https://github.com/elastic/elasticsearch/issues/118781 -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | SORT emp_no @@ -247,7 +256,7 @@ emp_no:integer ############################################### filterOnLeftSide -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | EVAL language_code = languages @@ -264,7 +273,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnRightSide -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -280,7 +289,7 @@ FROM sample_data ; filterOnRightSideAfterStats -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -293,7 +302,7 @@ count:long | type:keyword ; filterOnJoinKey -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | EVAL language_code = languages @@ -308,7 +317,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyAndRightSide -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | WHERE emp_no < 10006 @@ -325,7 +334,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnRightSideOnTheCoordinator -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | SORT emp_no @@ -341,7 +350,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyOnTheCoordinator -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | SORT emp_no @@ -357,7 +366,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyAndRightSideOnTheCoordinator -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | SORT emp_no @@ -374,7 +383,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnTheDataNodeThenFilterOnTheCoordinator -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | EVAL language_code = languages @@ -395,31 +404,35 @@ emp_no:integer | language_code:integer | language_name:keyword ########################################################################### nullJoinKeyOnTheDataNode -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | WHERE emp_no < 10004 | EVAL language_code = emp_no % 10, language_code = CASE(language_code == 3, null, language_code) | LOOKUP JOIN languages_lookup_non_unique_key ON language_code -| SORT emp_no +| SORT emp_no, language_code, language_name | KEEP emp_no, language_code, language_name ; emp_no:integer | language_code:integer | language_name:keyword -10001 | 1 | [English, English, English] -10002 | 2 | [German, German, German] +10001 | 1 | English +10001 | 1 | English +10001 | 1 | English +10001 | 1 | null +10002 | 2 | German +10002 | 2 | German +10002 | 2 | German 10003 | null | null ; mvJoinKeyOnTheDataNode -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | WHERE 10003 < emp_no AND emp_no < 10008 | EVAL language_code = emp_no % 10 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code -| SORT emp_no -| EVAL language_name = MV_SORT(language_name) +| SORT emp_no, language_name | KEEP emp_no, language_code, language_name ; @@ -427,38 +440,43 @@ emp_no:integer | language_code:integer | language_name:keyword 10004 | 4 | Quenya 10005 | 5 | null 10006 | 6 | Mv-Lang -10007 | 7 | [Mv-Lang, Mv-Lang2] +10007 | 7 | Mv-Lang +10007 | 7 | Mv-Lang2 ; mvJoinKeyFromRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW language_code = [4, 5, 6, 7] | LOOKUP JOIN languages_lookup_non_unique_key ON language_code -| EVAL language_name = MV_SORT(language_name), country = MV_SORT(country) | KEEP language_code, language_name, country +| SORT language_code, language_name, country ; -language_code:integer | language_name:keyword | country:keyword -[4, 5, 6, 7] | [Mv-Lang, Mv-Lang2, Quenya] | [Atlantis, Mv-Land, Mv-Land2] +language_code:integer | language_name:keyword | country:text +[4, 5, 6, 7] | Mv-Lang | Mv-Land +[4, 5, 6, 7] | Mv-Lang2 | Mv-Land2 +[4, 5, 6, 7] | Quenya | null +[4, 5, 6, 7] | null | Atlantis ; mvJoinKeyFromRowExpanded -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW language_code = [4, 5, 6, 7, 8] | MV_EXPAND language_code | LOOKUP JOIN languages_lookup_non_unique_key ON language_code -| EVAL language_name = MV_SORT(language_name), country = MV_SORT(country) | KEEP language_code, language_name, country +| SORT language_code, language_name, country ; -language_code:integer | language_name:keyword | country:keyword -4 | Quenya | null -5 | null | Atlantis -6 | Mv-Lang | Mv-Land -7 | [Mv-Lang, Mv-Lang2] | [Mv-Land, Mv-Land2] -8 | Mv-Lang2 | Mv-Land2 +language_code:integer | language_name:keyword | country:text +4 | Quenya | null +5 | null | Atlantis +6 | Mv-Lang | Mv-Land +7 | Mv-Lang | Mv-Land +7 | Mv-Lang2 | Mv-Land2 +8 | Mv-Lang2 | Mv-Land2 ; ########################################################################### @@ -466,7 +484,7 @@ language_code:integer | language_name:keyword | country:keyword ########################################################################### joinOnNestedField -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM employees | WHERE 10000 < emp_no AND emp_no < 10006 @@ -486,7 +504,7 @@ emp_no:integer | language.id:integer | language.name:text joinOnNestedFieldRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW language.code = "EN" | LOOKUP JOIN languages_nested_fields ON language.code @@ -499,7 +517,7 @@ language.id:integer | language.code:keyword | language.name.keyword:keyword joinOnNestedNestedFieldRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW language.name.keyword = "English" | LOOKUP JOIN languages_nested_fields ON language.name.keyword @@ -515,7 +533,7 @@ language.id:integer | language.name:text | language.name.keyword:keyword ############################################### lookupIPFromRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -526,7 +544,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromKeepRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", right = "right" | KEEP left, client_ip, right @@ -538,7 +556,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowing -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -549,7 +567,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -562,7 +580,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeepReordered -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -575,7 +593,7 @@ right | Development | 172.21.0.5 ; lookupIPFromIndex -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -594,7 +612,7 @@ ignoreOrder:true ; lookupIPFromIndexKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -614,7 +632,7 @@ ignoreOrder:true ; lookupIPFromIndexKeepKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -636,7 +654,7 @@ timestamp:date | client_ip:keyword | event_duration:long | msg:keyword ; lookupIPFromIndexStats -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -652,7 +670,7 @@ count:long | env:keyword ; lookupIPFromIndexStatsKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -669,7 +687,7 @@ count:long | env:keyword ; statsAndLookupIPFromIndex -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -690,7 +708,7 @@ count:long | client_ip:keyword | env:keyword ############################################### lookupMessageFromRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -701,7 +719,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromKeepRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | KEEP left, message, right @@ -713,7 +731,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowing -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -724,7 +742,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowingKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -736,7 +754,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromIndex -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -754,7 +772,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -773,7 +791,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -793,7 +811,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepReordered -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -812,7 +830,7 @@ Success | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; lookupMessageFromIndexStats -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -827,7 +845,7 @@ count:long | type:keyword ; lookupMessageFromIndexStatsKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -843,7 +861,7 @@ count:long | type:keyword ; statsAndLookupMessageFromIndex -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | STATS count = count(message) BY message @@ -861,7 +879,7 @@ count:long | type:keyword | message:keyword ; lookupMessageFromIndexTwice -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -883,7 +901,7 @@ ignoreOrder:true ; lookupMessageFromIndexTwiceKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -906,7 +924,7 @@ ignoreOrder:true ; lookupMessageFromIndexTwiceFullyShadowing -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -930,7 +948,7 @@ ignoreOrder:true ############################################### lookupIPAndMessageFromRow -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -942,7 +960,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBefore -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | KEEP left, client_ip, message, right @@ -955,7 +973,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBetween -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -968,7 +986,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepAfter -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -981,7 +999,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowing -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", type = "type", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -993,7 +1011,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -1007,7 +1025,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -1022,7 +1040,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeepKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -1038,7 +1056,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepReordered -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -1052,7 +1070,7 @@ right | Development | Success | 172.21.0.5 ; lookupIPAndMessageFromIndex -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1072,7 +1090,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1093,7 +1111,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexStats -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1111,7 +1129,7 @@ count:long | env:keyword | type:keyword ; lookupIPAndMessageFromIndexStatsKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1130,7 +1148,7 @@ count:long | env:keyword | type:keyword ; statsAndLookupIPAndMessageFromIndex -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1149,7 +1167,7 @@ count:long | client_ip:keyword | message:keyword | env:keyword | type:keyw ; lookupIPAndMessageFromIndexChainedEvalKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1171,7 +1189,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexChainedRenameKeep -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1193,7 +1211,7 @@ ignoreOrder:true ; lookupIndexInFromRepeatedRowBug -required_capability: join_lookup_v10 +required_capability: join_lookup_v11 FROM languages_lookup_non_unique_key | WHERE language_code == 1 | LOOKUP JOIN languages_lookup ON language_code diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index 34991c3958f2a..8e27cfceb28e6 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -160,7 +160,7 @@ public void testTaskContents() throws Exception { } if (o.operator().equals("ExchangeSinkOperator")) { ExchangeSinkOperator.Status oStatus = (ExchangeSinkOperator.Status) o.status(); - assertThat(oStatus.pagesAccepted(), greaterThanOrEqualTo(0)); + assertThat(oStatus.pagesReceived(), greaterThanOrEqualTo(0)); exchangeSinks++; } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java index f31eabea9d616..060a50684b39f 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java @@ -60,6 +60,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; @@ -80,9 +81,8 @@ public void testLookupIndex() throws IOException { /** * Tests when multiple results match. */ - @AwaitsFix(bugUrl = "fixing real soon now") public void testLookupIndexMultiResults() throws IOException { - runLookup(new UsingSingleLookupTable(new Object[] { "aa", new String[] { "bb", "ff" }, "cc", "dd" })); + runLookup(new UsingSingleLookupTable(new String[] { "aa", "bb", "bb", "dd" })); } interface PopulateIndices { @@ -90,24 +90,24 @@ interface PopulateIndices { } class UsingSingleLookupTable implements PopulateIndices { - private final Object[] lookupData; + private final Map> matches = new HashMap<>(); + private final String[] lookupData; - UsingSingleLookupTable(Object[] lookupData) { + UsingSingleLookupTable(String[] lookupData) { this.lookupData = lookupData; + for (int i = 0; i < lookupData.length; i++) { + matches.computeIfAbsent(lookupData[i], k -> new ArrayList<>()).add(i); + } } @Override - public void populate(int docCount, List expected) throws IOException { + public void populate(int docCount, List expected) { List docs = new ArrayList<>(); for (int i = 0; i < docCount; i++) { - docs.add(client().prepareIndex("source").setSource(Map.of("data", lookupData[i % lookupData.length]))); - Object d = lookupData[i % lookupData.length]; - if (d instanceof String s) { - expected.add(s + ":" + (i % lookupData.length)); - } else if (d instanceof String[] ss) { - for (String s : ss) { - expected.add(s + ":" + (i % lookupData.length)); - } + String data = lookupData[i % lookupData.length]; + docs.add(client().prepareIndex("source").setSource(Map.of("data", data))); + for (Integer match : matches.get(data)) { + expected.add(data + ":" + match); } } for (int i = 0; i < lookupData.length; i++) { diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatMillisConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatMillisConstantEvaluator.java new file mode 100644 index 0000000000000..2f41a7440bb06 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatMillisConstantEvaluator.java @@ -0,0 +1,132 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link DateFormat}. + * This class is generated. Do not edit it. + */ +public final class DateFormatMillisConstantEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final EvalOperator.ExpressionEvaluator val; + + private final DateFormatter formatter; + + private final DriverContext driverContext; + + private Warnings warnings; + + public DateFormatMillisConstantEvaluator(Source source, EvalOperator.ExpressionEvaluator val, + DateFormatter formatter, DriverContext driverContext) { + this.source = source; + this.val = val; + this.formatter = formatter; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { + LongVector valVector = valBlock.asVector(); + if (valVector == null) { + return eval(page.getPositionCount(), valBlock); + } + return eval(page.getPositionCount(), valVector).asBlock(); + } + } + + public BytesRefBlock eval(int positionCount, LongBlock valBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + if (valBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (valBlock.getValueCount(p) != 1) { + if (valBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendBytesRef(DateFormat.processMillis(valBlock.getLong(valBlock.getFirstValueIndex(p)), this.formatter)); + } + return result.build(); + } + } + + public BytesRefVector eval(int positionCount, LongVector valVector) { + try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + result.appendBytesRef(DateFormat.processMillis(valVector.getLong(p), this.formatter)); + } + return result.build(); + } + } + + @Override + public String toString() { + return "DateFormatMillisConstantEvaluator[" + "val=" + val + ", formatter=" + formatter + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(val); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory val; + + private final DateFormatter formatter; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val, + DateFormatter formatter) { + this.source = source; + this.val = val; + this.formatter = formatter; + } + + @Override + public DateFormatMillisConstantEvaluator get(DriverContext context) { + return new DateFormatMillisConstantEvaluator(source, val.get(context), formatter, context); + } + + @Override + public String toString() { + return "DateFormatMillisConstantEvaluator[" + "val=" + val + ", formatter=" + formatter + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatMillisEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatMillisEvaluator.java new file mode 100644 index 0000000000000..29da191dbe781 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatMillisEvaluator.java @@ -0,0 +1,159 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.Locale; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link DateFormat}. + * This class is generated. Do not edit it. + */ +public final class DateFormatMillisEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final EvalOperator.ExpressionEvaluator val; + + private final EvalOperator.ExpressionEvaluator formatter; + + private final Locale locale; + + private final DriverContext driverContext; + + private Warnings warnings; + + public DateFormatMillisEvaluator(Source source, EvalOperator.ExpressionEvaluator val, + EvalOperator.ExpressionEvaluator formatter, Locale locale, DriverContext driverContext) { + this.source = source; + this.val = val; + this.formatter = formatter; + this.locale = locale; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { + try (BytesRefBlock formatterBlock = (BytesRefBlock) formatter.eval(page)) { + LongVector valVector = valBlock.asVector(); + if (valVector == null) { + return eval(page.getPositionCount(), valBlock, formatterBlock); + } + BytesRefVector formatterVector = formatterBlock.asVector(); + if (formatterVector == null) { + return eval(page.getPositionCount(), valBlock, formatterBlock); + } + return eval(page.getPositionCount(), valVector, formatterVector).asBlock(); + } + } + } + + public BytesRefBlock eval(int positionCount, LongBlock valBlock, BytesRefBlock formatterBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef formatterScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (valBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (valBlock.getValueCount(p) != 1) { + if (valBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (formatterBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (formatterBlock.getValueCount(p) != 1) { + if (formatterBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendBytesRef(DateFormat.processMillis(valBlock.getLong(valBlock.getFirstValueIndex(p)), formatterBlock.getBytesRef(formatterBlock.getFirstValueIndex(p), formatterScratch), this.locale)); + } + return result.build(); + } + } + + public BytesRefVector eval(int positionCount, LongVector valVector, + BytesRefVector formatterVector) { + try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef formatterScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBytesRef(DateFormat.processMillis(valVector.getLong(p), formatterVector.getBytesRef(p, formatterScratch), this.locale)); + } + return result.build(); + } + } + + @Override + public String toString() { + return "DateFormatMillisEvaluator[" + "val=" + val + ", formatter=" + formatter + ", locale=" + locale + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(val, formatter); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory val; + + private final EvalOperator.ExpressionEvaluator.Factory formatter; + + private final Locale locale; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val, + EvalOperator.ExpressionEvaluator.Factory formatter, Locale locale) { + this.source = source; + this.val = val; + this.formatter = formatter; + this.locale = locale; + } + + @Override + public DateFormatMillisEvaluator get(DriverContext context) { + return new DateFormatMillisEvaluator(source, val.get(context), formatter.get(context), locale, context); + } + + @Override + public String toString() { + return "DateFormatMillisEvaluator[" + "val=" + val + ", formatter=" + formatter + ", locale=" + locale + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatNanosConstantEvaluator.java similarity index 83% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatConstantEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatNanosConstantEvaluator.java index 25afa6bec360b..1488833227dcb 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatConstantEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatNanosConstantEvaluator.java @@ -24,7 +24,7 @@ * {@link EvalOperator.ExpressionEvaluator} implementation for {@link DateFormat}. * This class is generated. Do not edit it. */ -public final class DateFormatConstantEvaluator implements EvalOperator.ExpressionEvaluator { +public final class DateFormatNanosConstantEvaluator implements EvalOperator.ExpressionEvaluator { private final Source source; private final EvalOperator.ExpressionEvaluator val; @@ -35,7 +35,7 @@ public final class DateFormatConstantEvaluator implements EvalOperator.Expressio private Warnings warnings; - public DateFormatConstantEvaluator(Source source, EvalOperator.ExpressionEvaluator val, + public DateFormatNanosConstantEvaluator(Source source, EvalOperator.ExpressionEvaluator val, DateFormatter formatter, DriverContext driverContext) { this.source = source; this.val = val; @@ -68,7 +68,7 @@ public BytesRefBlock eval(int positionCount, LongBlock valBlock) { result.appendNull(); continue position; } - result.appendBytesRef(DateFormat.process(valBlock.getLong(valBlock.getFirstValueIndex(p)), this.formatter)); + result.appendBytesRef(DateFormat.processNanos(valBlock.getLong(valBlock.getFirstValueIndex(p)), this.formatter)); } return result.build(); } @@ -77,7 +77,7 @@ public BytesRefBlock eval(int positionCount, LongBlock valBlock) { public BytesRefVector eval(int positionCount, LongVector valVector) { try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { position: for (int p = 0; p < positionCount; p++) { - result.appendBytesRef(DateFormat.process(valVector.getLong(p), this.formatter)); + result.appendBytesRef(DateFormat.processNanos(valVector.getLong(p), this.formatter)); } return result.build(); } @@ -85,7 +85,7 @@ public BytesRefVector eval(int positionCount, LongVector valVector) { @Override public String toString() { - return "DateFormatConstantEvaluator[" + "val=" + val + ", formatter=" + formatter + "]"; + return "DateFormatNanosConstantEvaluator[" + "val=" + val + ", formatter=" + formatter + "]"; } @Override @@ -120,13 +120,13 @@ public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val, } @Override - public DateFormatConstantEvaluator get(DriverContext context) { - return new DateFormatConstantEvaluator(source, val.get(context), formatter, context); + public DateFormatNanosConstantEvaluator get(DriverContext context) { + return new DateFormatNanosConstantEvaluator(source, val.get(context), formatter, context); } @Override public String toString() { - return "DateFormatConstantEvaluator[" + "val=" + val + ", formatter=" + formatter + "]"; + return "DateFormatNanosConstantEvaluator[" + "val=" + val + ", formatter=" + formatter + "]"; } } } diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatNanosEvaluator.java similarity index 84% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatNanosEvaluator.java index 318ffa5af8f77..a94d522014813 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatNanosEvaluator.java @@ -25,7 +25,7 @@ * {@link EvalOperator.ExpressionEvaluator} implementation for {@link DateFormat}. * This class is generated. Do not edit it. */ -public final class DateFormatEvaluator implements EvalOperator.ExpressionEvaluator { +public final class DateFormatNanosEvaluator implements EvalOperator.ExpressionEvaluator { private final Source source; private final EvalOperator.ExpressionEvaluator val; @@ -38,7 +38,7 @@ public final class DateFormatEvaluator implements EvalOperator.ExpressionEvaluat private Warnings warnings; - public DateFormatEvaluator(Source source, EvalOperator.ExpressionEvaluator val, + public DateFormatNanosEvaluator(Source source, EvalOperator.ExpressionEvaluator val, EvalOperator.ExpressionEvaluator formatter, Locale locale, DriverContext driverContext) { this.source = source; this.val = val; @@ -90,7 +90,7 @@ public BytesRefBlock eval(int positionCount, LongBlock valBlock, BytesRefBlock f result.appendNull(); continue position; } - result.appendBytesRef(DateFormat.process(valBlock.getLong(valBlock.getFirstValueIndex(p)), formatterBlock.getBytesRef(formatterBlock.getFirstValueIndex(p), formatterScratch), this.locale)); + result.appendBytesRef(DateFormat.processNanos(valBlock.getLong(valBlock.getFirstValueIndex(p)), formatterBlock.getBytesRef(formatterBlock.getFirstValueIndex(p), formatterScratch), this.locale)); } return result.build(); } @@ -101,7 +101,7 @@ public BytesRefVector eval(int positionCount, LongVector valVector, try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { BytesRef formatterScratch = new BytesRef(); position: for (int p = 0; p < positionCount; p++) { - result.appendBytesRef(DateFormat.process(valVector.getLong(p), formatterVector.getBytesRef(p, formatterScratch), this.locale)); + result.appendBytesRef(DateFormat.processNanos(valVector.getLong(p), formatterVector.getBytesRef(p, formatterScratch), this.locale)); } return result.build(); } @@ -109,7 +109,7 @@ public BytesRefVector eval(int positionCount, LongVector valVector, @Override public String toString() { - return "DateFormatEvaluator[" + "val=" + val + ", formatter=" + formatter + ", locale=" + locale + "]"; + return "DateFormatNanosEvaluator[" + "val=" + val + ", formatter=" + formatter + ", locale=" + locale + "]"; } @Override @@ -147,13 +147,13 @@ public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val, } @Override - public DateFormatEvaluator get(DriverContext context) { - return new DateFormatEvaluator(source, val.get(context), formatter.get(context), locale, context); + public DateFormatNanosEvaluator get(DriverContext context) { + return new DateFormatNanosEvaluator(source, val.get(context), formatter.get(context), locale, context); } @Override public String toString() { - return "DateFormatEvaluator[" + "val=" + val + ", formatter=" + formatter + ", locale=" + locale + "]"; + return "DateFormatNanosEvaluator[" + "val=" + val + ", formatter=" + formatter + ", locale=" + locale + "]"; } } } diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/util/DelayEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/util/DelayEvaluator.java deleted file mode 100644 index 0db714eceb285..0000000000000 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/util/DelayEvaluator.java +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.xpack.esql.expression.function.scalar.util; - -import java.lang.Override; -import java.lang.String; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.compute.operator.Warnings; -import org.elasticsearch.xpack.esql.core.tree.Source; - -/** - * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Delay}. - * This class is generated. Do not edit it. - */ -public final class DelayEvaluator implements EvalOperator.ExpressionEvaluator { - private final Source source; - - private final long ms; - - private final DriverContext driverContext; - - private Warnings warnings; - - public DelayEvaluator(Source source, long ms, DriverContext driverContext) { - this.source = source; - this.ms = ms; - this.driverContext = driverContext; - } - - @Override - public Block eval(Page page) { - return eval(page.getPositionCount()).asBlock(); - } - - public BooleanVector eval(int positionCount) { - try(BooleanVector.FixedBuilder result = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { - position: for (int p = 0; p < positionCount; p++) { - result.appendBoolean(p, Delay.process(this.ms)); - } - return result.build(); - } - } - - @Override - public String toString() { - return "DelayEvaluator[" + "ms=" + ms + "]"; - } - - @Override - public void close() { - } - - private Warnings warnings() { - if (warnings == null) { - this.warnings = Warnings.createWarnings( - driverContext.warningsMode(), - source.source().getLineNumber(), - source.source().getColumnNumber(), - source.text() - ); - } - return warnings; - } - - static class Factory implements EvalOperator.ExpressionEvaluator.Factory { - private final Source source; - - private final long ms; - - public Factory(Source source, long ms) { - this.source = source; - this.ms = ms; - } - - @Override - public DelayEvaluator get(DriverContext context) { - return new DelayEvaluator(source, ms, context); - } - - @Override - public String toString() { - return "DelayEvaluator[" + "ms=" + ms + "]"; - } - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 4531532fac3c5..5468d57392c2e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -27,6 +27,113 @@ */ public class EsqlCapabilities { public enum Cap { + /** + * Introduction of {@code MV_SORT}, {@code MV_SLICE}, and {@code MV_ZIP}. + * Added in #106095. + */ + MV_SORT, + + /** + * When we disabled some broken optimizations around {@code nullable}. + * Fixed in #105691. + */ + DISABLE_NULLABLE_OPTS, + + /** + * Introduction of {@code ST_X} and {@code ST_Y}. Added in #105768. + */ + ST_X_Y, + + /** + * Changed precision of {@code geo_point} and {@code cartesian_point} fields, by loading from source into WKB. Done in #103691. + */ + SPATIAL_POINTS_FROM_SOURCE, + + /** + * Support for loading {@code geo_shape} and {@code cartesian_shape} fields. Done in #104269. + */ + SPATIAL_SHAPES, + + /** + * Support for spatial aggregation {@code ST_CENTROID}. Done in #104269. + */ + ST_CENTROID_AGG, + + /** + * Support for spatial aggregation {@code ST_INTERSECTS}. Done in #104907. + */ + ST_INTERSECTS, + + /** + * Support for spatial aggregation {@code ST_CONTAINS} and {@code ST_WITHIN}. Done in #106503. + */ + ST_CONTAINS_WITHIN, + + /** + * Support for spatial aggregation {@code ST_DISJOINT}. Done in #107007. + */ + ST_DISJOINT, + + /** + * The introduction of the {@code VALUES} agg. + */ + AGG_VALUES, + + /** + * Does ESQL support async queries. + */ + ASYNC_QUERY, + + /** + * Does ESQL support FROM OPTIONS? + */ + @Deprecated + FROM_OPTIONS, + + /** + * Cast string literals to a desired data type. + */ + STRING_LITERAL_AUTO_CASTING, + + /** + * Base64 encoding and decoding functions. + */ + BASE64_DECODE_ENCODE, + + /** + * Support for the :: casting operator + */ + CASTING_OPERATOR, + + /** + * Blocks can be labelled with {@link org.elasticsearch.compute.data.Block.MvOrdering#SORTED_ASCENDING} for optimizations. + */ + MV_ORDERING_SORTED_ASCENDING, + + /** + * Support for metrics counter fields + */ + METRICS_COUNTER_FIELDS, + + /** + * Cast string literals to a desired data type for IN predicate and more types for BinaryComparison. + */ + STRING_LITERAL_AUTO_CASTING_EXTENDED, + + /** + * Support for metadata fields. + */ + METADATA_FIELDS, + + /** + * Support for timespan units abbreviations + */ + TIMESPAN_ABBREVIATIONS, + + /** + * Support metrics counter types + */ + COUNTER_TYPES, /** * Support for function {@code BIT_LENGTH}. Done in #115792 @@ -384,6 +491,10 @@ public enum Cap { * Support the {@link org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In} operator for date nanos */ DATE_NANOS_IN_OPERATOR(), + /** + * Support running date format function on nanosecond dates + */ + DATE_NANOS_DATE_FORMAT(), /** * DATE_PARSE supports reading timezones @@ -569,7 +680,7 @@ public enum Cap { /** * LOOKUP JOIN */ - JOIN_LOOKUP_V10(Build.current().isSnapshot()), + JOIN_LOOKUP_V11(Build.current().isSnapshot()), /** * Fix for https://github.com/elastic/elasticsearch/issues/117054 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java index fb7e0f651458c..3d38b697dc5be 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.Locale; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import static org.elasticsearch.xpack.esql.formatter.TextFormat.CSV; import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; @@ -188,29 +189,26 @@ public ActionListener wrapWithLogging() { if (LOGGER.isDebugEnabled() == false) { return listener; } + Consumer logger = response -> LOGGER.debug( + "ESQL query execution {}.\nQuery string or async ID: [{}]\nExecution time: {}ms", + response == null ? "failed" : "finished", + esqlQueryOrId, + getTook(response, TimeUnit.MILLISECONDS) + ); return ActionListener.wrap(r -> { listener.onResponse(r); - // At this point, the StopWatch should already have been stopped, so we log a consistent time. - LOGGER.debug( - "Finished execution of ESQL query.\nQuery string or async ID: [{}]\nExecution time: [{}]ms", - esqlQueryOrId, - getTook(r, TimeUnit.MILLISECONDS) - ); + logger.accept(r); }, ex -> { // In case of failure, stop the time manually before sending out the response. - long timeMillis = getTook(null, TimeUnit.MILLISECONDS); - LOGGER.debug( - "Failed execution of ESQL query.\nQuery string or async ID: [{}]\nExecution time: [{}]ms", - esqlQueryOrId, - timeMillis - ); + logger.accept(null); listener.onFailure(ex); }); } static void logOnFailure(Throwable throwable) { RestStatus status = ExceptionsHelper.status(throwable); - LOGGER.log(status.getStatus() >= 500 ? Level.WARN : Level.DEBUG, () -> "Request failed with status [" + status + "]: ", throwable); + var level = status.getStatus() >= 500 ? Level.WARN : Level.DEBUG; + LOGGER.log(level, () -> "ESQL request failed with status [" + status + "]: ", throwable); } /* diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java index bd2f8eb38f96f..0fd35bc3c455a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -46,21 +45,9 @@ public Set supportedCapabilities() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - EsqlQueryRequest esqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - esqlRequest = RequestXContent.parseAsync(parser); + return RestEsqlQueryAction.restChannelConsumer(RequestXContent.parseAsync(parser), request, client); } - - LOGGER.debug("Beginning execution of ESQL async query.\nQuery string: [{}]", esqlRequest.query()); - - return channel -> { - RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancellableClient.execute( - EsqlQueryAction.INSTANCE, - esqlRequest, - new EsqlResponseListener(channel, request, esqlRequest).wrapWithLogging() - ); - }; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java index 7f5adc310a535..ebe51cc2ab4e3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java @@ -45,11 +45,12 @@ public Set supportedCapabilities() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - EsqlQueryRequest esqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - esqlRequest = RequestXContent.parseSync(parser); + return restChannelConsumer(RequestXContent.parseSync(parser), request, client); } + } + protected static RestChannelConsumer restChannelConsumer(EsqlQueryRequest esqlRequest, RestRequest request, NodeClient client) { LOGGER.debug("Beginning execution of ESQL query.\nQuery string: [{}]", esqlRequest.query()); return channel -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java index 74c66c0d1b338..a486d574ddd84 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java @@ -22,13 +22,11 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntVector; @@ -41,6 +39,7 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OutputOperator; +import org.elasticsearch.compute.operator.ProjectOperator; import org.elasticsearch.compute.operator.Warnings; import org.elasticsearch.compute.operator.lookup.EnrichQuerySourceOperator; import org.elasticsearch.compute.operator.lookup.MergePositionsOperator; @@ -87,19 +86,19 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; /** - * {@link AbstractLookupService} performs a single valued {@code LEFT JOIN} for a - * given input page against another index. This is quite similar to a nested loop - * join. It is restricted to indices with only a single shard. + * {@link AbstractLookupService} performs a {@code LEFT JOIN} for a given input + * page against another index that must have only a single + * shard. *

* This registers a {@link TransportRequestHandler} so we can handle requests * to join data that isn't local to the node, but it is much faster if the @@ -107,7 +106,7 @@ *

*

* The join process spawns a {@link Driver} per incoming page which runs in - * three stages: + * two or three stages: *

*

* Stage 1: Finding matching document IDs for the input page. This stage is done @@ -120,9 +119,9 @@ * {@code [DocVector, IntBlock: positions, Block: field1, Block: field2,...]}. *

*

- * Stage 3: Combining the extracted values based on positions and filling nulls for - * positions without matches. This is done by {@link MergePositionsOperator}. The output - * page is represented as {@code [Block: field1, Block: field2,...]}. + * Stage 3: Optionally this combines the extracted values based on positions and filling + * nulls for positions without matches. This is done by {@link MergePositionsOperator}. + * The output page is represented as {@code [Block: field1, Block: field2,...]}. *

*

* The {@link Page#getPositionCount()} of the output {@link Page} is equal to the @@ -139,6 +138,15 @@ abstract class AbstractLookupService readRequest ) { this.actionName = actionName; @@ -157,6 +166,7 @@ abstract class AbstractLookupService resultPages, BlockFactory blockFactory) throws IOException; + + /** + * Read the response from a {@link StreamInput}. + */ + protected abstract LookupResponse readLookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException; + protected static QueryList termQueryList( MappedFieldType field, SearchExecutionContext searchExecutionContext, @@ -196,9 +216,9 @@ protected static QueryList termQueryList( /** * Perform the actual lookup. */ - public final void lookupAsync(R request, CancellableTask parentTask, ActionListener outListener) { + public final void lookupAsync(R request, CancellableTask parentTask, ActionListener> outListener) { ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - ActionListener listener = ContextPreservingActionListener.wrapPreservingContext(outListener, threadContext); + ActionListener> listener = ContextPreservingActionListener.wrapPreservingContext(outListener, threadContext); hasPrivilege(listener.delegateFailureAndWrap((delegate, ignored) -> { ClusterState clusterState = clusterService.state(); GroupShardsIterator shardIterators = clusterService.operationRouting() @@ -225,8 +245,8 @@ public final void lookupAsync(R request, CancellableTask parentTask, ActionListe parentTask, TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>( - delegate.map(LookupResponse::takePage), - in -> new LookupResponse(in, blockFactory), + delegate.map(LookupResponse::takePages), + in -> readLookupResponse(in, blockFactory), executor ) ); @@ -291,10 +311,13 @@ private void hasPrivilege(ActionListener outListener) { ); } - private void doLookup(T request, CancellableTask task, ActionListener listener) { + private void doLookup(T request, CancellableTask task, ActionListener> listener) { Block inputBlock = request.inputPage.getBlock(0); if (inputBlock.areAllValuesNull()) { - listener.onResponse(createNullResponse(request.inputPage.getPositionCount(), request.extractFields)); + List nullResponse = mergePages + ? List.of(createNullResponse(request.inputPage.getPositionCount(), request.extractFields)) + : List.of(); + listener.onResponse(nullResponse); return; } final List releasables = new ArrayList<>(6); @@ -315,31 +338,31 @@ private void doLookup(T request, CancellableTask task, ActionListener list mergingTypes[i] = PlannerUtils.toElementType(request.extractFields.get(i).dataType()); } final int[] mergingChannels = IntStream.range(0, request.extractFields.size()).map(i -> i + 2).toArray(); - final MergePositionsOperator mergePositionsOperator; + final Operator finishPages; final OrdinalBytesRefBlock ordinalsBytesRefBlock; - if (inputBlock instanceof BytesRefBlock bytesRefBlock && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { + if (mergePages // TODO fix this optimization for Lookup. + && inputBlock instanceof BytesRefBlock bytesRefBlock + && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { + inputBlock = ordinalsBytesRefBlock.getDictionaryVector().asBlock(); var selectedPositions = ordinalsBytesRefBlock.getOrdinalsBlock(); - mergePositionsOperator = new MergePositionsOperator( - 1, - mergingChannels, - mergingTypes, - selectedPositions, - driverContext.blockFactory() - ); - + finishPages = new MergePositionsOperator(1, mergingChannels, mergingTypes, selectedPositions, driverContext.blockFactory()); } else { - try (var selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock()) { - mergePositionsOperator = new MergePositionsOperator( - 1, - mergingChannels, - mergingTypes, - selectedPositions, - driverContext.blockFactory() - ); + if (mergePages) { + try (var selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock()) { + finishPages = new MergePositionsOperator( + 1, + mergingChannels, + mergingTypes, + selectedPositions, + driverContext.blockFactory() + ); + } + } else { + finishPages = dropDocBlockOperator(request.extractFields); } } - releasables.add(mergePositionsOperator); + releasables.add(finishPages); SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); QueryList queryList = queryList(request, searchExecutionContext, inputBlock, request.inputDataType); var warnings = Warnings.createWarnings( @@ -359,8 +382,15 @@ private void doLookup(T request, CancellableTask task, ActionListener list var extractFieldsOperator = extractFieldsOperator(searchContext, driverContext, request.extractFields); releasables.add(extractFieldsOperator); - AtomicReference result = new AtomicReference<>(); - OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), result::set); + /* + * Collect all result Pages in a synchronizedList mostly out of paranoia. We'll + * be collecting these results in the Driver thread and reading them in its + * completion listener which absolutely happens-after the insertions. So, + * technically, we don't need synchronization here. But we're doing it anyway + * because the list will never grow mega large. + */ + List collectedPages = Collections.synchronizedList(new ArrayList<>()); + OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), collectedPages::add); releasables.add(outputOperator); Driver driver = new Driver( "enrich-lookup:" + request.sessionId, @@ -369,7 +399,7 @@ private void doLookup(T request, CancellableTask task, ActionListener list driverContext, request::toString, queryOperator, - List.of(extractFieldsOperator, mergePositionsOperator), + List.of(extractFieldsOperator, finishPages), outputOperator, Driver.DEFAULT_STATUS_INTERVAL, Releasables.wrap(searchContext, localBreaker) @@ -380,9 +410,9 @@ private void doLookup(T request, CancellableTask task, ActionListener list }); var threadContext = transportService.getThreadPool().getThreadContext(); Driver.start(threadContext, executor, driver, Driver.DEFAULT_MAX_ITERATIONS, listener.map(ignored -> { - Page out = result.get(); - if (out == null) { - out = createNullResponse(request.inputPage.getPositionCount(), request.extractFields); + List out = collectedPages; + if (mergePages && out.isEmpty()) { + out = List.of(createNullResponse(request.inputPage.getPositionCount(), request.extractFields)); } return out; })); @@ -434,6 +464,18 @@ private static Operator extractFieldsOperator( ); } + /** + * Drop just the first block, keeping the remaining. + */ + private Operator dropDocBlockOperator(List extractFields) { + int end = extractFields.size() + 1; + List projection = new ArrayList<>(end); + for (int i = 1; i <= end; i++) { + projection.add(i); + } + return new ProjectOperator(projection); + } + private Page createNullResponse(int positionCount, List extractFields) { final Block[] blocks = new Block[extractFields.size()]; try { @@ -457,7 +499,7 @@ public void messageReceived(T request, TransportChannel channel, Task task) { request, (CancellableTask) task, listener.delegateFailureAndWrap( - (l, outPage) -> ActionListener.respondAndRelease(l, new LookupResponse(outPage, blockFactory)) + (l, resultPages) -> ActionListener.respondAndRelease(l, createLookupResponse(resultPages, blockFactory)) ) ); } @@ -587,45 +629,24 @@ public final String toString() { protected abstract String extraDescription(); } - private static class LookupResponse extends TransportResponse { - private final RefCounted refs = AbstractRefCounted.of(this::releasePage); - private final BlockFactory blockFactory; - private Page page; - private long reservedBytes = 0; + abstract static class LookupResponse extends TransportResponse { + private final RefCounted refs = AbstractRefCounted.of(this::release); + protected final BlockFactory blockFactory; + protected long reservedBytes = 0; - LookupResponse(Page page, BlockFactory blockFactory) { - this.page = page; - this.blockFactory = blockFactory; - } - - LookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException { - try (BlockStreamInput bsi = new BlockStreamInput(in, blockFactory)) { - this.page = new Page(bsi); - } + LookupResponse(BlockFactory blockFactory) { this.blockFactory = blockFactory; } - @Override - public void writeTo(StreamOutput out) throws IOException { - long bytes = page.ramBytesUsedByBlocks(); - blockFactory.breaker().addEstimateBytesAndMaybeBreak(bytes, "serialize enrich lookup response"); - reservedBytes += bytes; - page.writeTo(out); - } - - Page takePage() { - var p = page; - page = null; - return p; - } + protected abstract List takePages(); - private void releasePage() { + private void release() { blockFactory.breaker().addWithoutBreaking(-reservedBytes); - if (page != null) { - Releasables.closeExpectNoException(page::releaseBlocks); - } + innerRelease(); } + protected abstract void innerRelease(); + @Override public void incRef() { refs.incRef(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java index df608a04632a2..8083d67e5a19d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java @@ -17,6 +17,7 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.ResponseHeadersCollector; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; @@ -27,7 +28,7 @@ import java.util.List; import java.util.Objects; -public final class EnrichLookupOperator extends AsyncOperator { +public final class EnrichLookupOperator extends AsyncOperator { private final EnrichLookupService enrichLookupService; private final String sessionId; private final CancellableTask parentTask; @@ -128,13 +129,29 @@ protected void performAsync(Page inputPage, ActionListener listener) { enrichFields, source ); + CheckedFunction, Page, Exception> handleResponse = pages -> { + if (pages.size() != 1) { + throw new UnsupportedOperationException("ENRICH should only return a single page"); + } + return inputPage.appendPage(pages.getFirst()); + }; enrichLookupService.lookupAsync( request, parentTask, - ActionListener.runBefore(listener.map(inputPage::appendPage), responseHeadersCollector::collect) + ActionListener.runBefore(listener.map(handleResponse), responseHeadersCollector::collect) ); } + @Override + public Page getOutput() { + return fetchFromBuffer(); + } + + @Override + protected void releaseFetchedOnAnyThread(Page page) { + releasePageOnAnyThread(page); + } + @Override public String toString() { return "EnrichOperator[index=" diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 7057b586871eb..e3d962fa9231b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -17,6 +17,7 @@ import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.lookup.QueryList; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.mapper.RangeType; @@ -52,7 +53,16 @@ public EnrichLookupService( BigArrays bigArrays, BlockFactory blockFactory ) { - super(LOOKUP_ACTION_NAME, clusterService, searchService, transportService, bigArrays, blockFactory, TransportRequest::readFrom); + super( + LOOKUP_ACTION_NAME, + clusterService, + searchService, + transportService, + bigArrays, + blockFactory, + true, + TransportRequest::readFrom + ); } @Override @@ -86,6 +96,19 @@ protected String getRequiredPrivilege() { return ClusterPrivilegeResolver.MONITOR_ENRICH.name(); } + @Override + protected LookupResponse createLookupResponse(List pages, BlockFactory blockFactory) throws IOException { + if (pages.size() != 1) { + throw new UnsupportedOperationException("ENRICH always makes a single page of output"); + } + return new LookupResponse(pages.getFirst(), blockFactory); + } + + @Override + protected LookupResponse readLookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException { + return new LookupResponse(in, blockFactory); + } + private static void validateTypes(DataType inputDataType, MappedFieldType fieldType) { if (fieldType instanceof RangeFieldMapper.RangeFieldType rangeType) { // For range policy types, the ENRICH index field type will be one of a list of supported range types, @@ -210,4 +233,42 @@ protected String extraDescription() { return " ,match_type=" + matchType + " ,match_field=" + matchField; } } + + private static class LookupResponse extends AbstractLookupService.LookupResponse { + private Page page; + + private LookupResponse(Page page, BlockFactory blockFactory) { + super(blockFactory); + this.page = page; + } + + private LookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException { + super(blockFactory); + try (BlockStreamInput bsi = new BlockStreamInput(in, blockFactory)) { + this.page = new Page(bsi); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + long bytes = page.ramBytesUsedByBlocks(); + blockFactory.breaker().addEstimateBytesAndMaybeBreak(bytes, "serialize enrich lookup response"); + reservedBytes += bytes; + page.writeTo(out); + } + + @Override + protected List takePages() { + var p = List.of(page); + page = null; + return p; + } + + @Override + protected void innerRelease() { + if (page != null) { + Releasables.closeExpectNoException(page::releaseBlocks); + } + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java index f09f7d0e23e7b..73dfcf8d43620 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.enrich; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -15,7 +16,11 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AsyncOperator; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.compute.operator.lookup.RightChunkedLeftJoin; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; @@ -23,11 +28,13 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import java.io.IOException; +import java.util.Iterator; import java.util.List; import java.util.Objects; +import java.util.Optional; // TODO rename package -public final class LookupFromIndexOperator extends AsyncOperator { +public final class LookupFromIndexOperator extends AsyncOperator { public record Factory( String sessionId, CancellableTask parentTask, @@ -81,6 +88,14 @@ public Operator get(DriverContext driverContext) { private final List loadFields; private final Source source; private long totalTerms = 0L; + /** + * Total number of pages emitted by this {@link Operator}. + */ + private long emittedPages = 0L; + /** + * The ongoing join or {@code null} none is ongoing at the moment. + */ + private OngoingJoin ongoing = null; public LookupFromIndexOperator( String sessionId, @@ -108,7 +123,7 @@ public LookupFromIndexOperator( } @Override - protected void performAsync(Page inputPage, ActionListener listener) { + protected void performAsync(Page inputPage, ActionListener listener) { final Block inputBlock = inputPage.getBlock(inputChannel); totalTerms += inputBlock.getTotalValueCount(); LookupFromIndexService.Request request = new LookupFromIndexService.Request( @@ -120,7 +135,47 @@ protected void performAsync(Page inputPage, ActionListener listener) { loadFields, source ); - lookupService.lookupAsync(request, parentTask, listener.map(inputPage::appendPage)); + lookupService.lookupAsync( + request, + parentTask, + listener.map(pages -> new OngoingJoin(new RightChunkedLeftJoin(inputPage, loadFields.size()), pages.iterator())) + ); + } + + @Override + public Page getOutput() { + if (ongoing == null) { + // No ongoing join, start a new one if we can. + ongoing = fetchFromBuffer(); + if (ongoing == null) { + // Buffer empty, wait for the next time we're called. + return null; + } + } + if (ongoing.itr.hasNext()) { + // There's more to do in the ongoing join. + Page right = ongoing.itr.next(); + emittedPages++; + try { + return ongoing.join.join(right); + } finally { + right.releaseBlocks(); + } + } + // Current join is all done. Emit any trailing unmatched rows. + Optional remaining = ongoing.join.noMoreRightHandPages(); + ongoing.close(); + ongoing = null; + if (remaining.isEmpty()) { + return null; + } + emittedPages++; + return remaining.get(); + } + + @Override + protected void releaseFetchedOnAnyThread(OngoingJoin ongoingJoin) { + ongoingJoin.releaseOnAnyThread(); } @Override @@ -138,15 +193,29 @@ public String toString() { + "]"; } + @Override + public boolean isFinished() { + return ongoing == null && super.isFinished(); + } + + @Override + public IsBlockedResult isBlocked() { + if (ongoing != null) { + return NOT_BLOCKED; + } + return super.isBlocked(); + } + @Override protected void doClose() { // TODO: Maybe create a sub-task as the parent task of all the lookup tasks // then cancel it when this operator terminates early (e.g., have enough result). + Releasables.close(ongoing); } @Override protected Operator.Status status(long receivedPages, long completedPages, long totalTimeInMillis) { - return new LookupFromIndexOperator.Status(receivedPages, completedPages, totalTimeInMillis, totalTerms); + return new LookupFromIndexOperator.Status(receivedPages, completedPages, totalTimeInMillis, totalTerms, emittedPages); } public static class Status extends AsyncOperator.Status { @@ -156,22 +225,29 @@ public static class Status extends AsyncOperator.Status { Status::new ); - final long totalTerms; + private final long totalTerms; + /** + * Total number of pages emitted by this {@link Operator}. + */ + private final long emittedPages; - Status(long receivedPages, long completedPages, long totalTimeInMillis, long totalTerms) { + Status(long receivedPages, long completedPages, long totalTimeInMillis, long totalTerms, long emittedPages) { super(receivedPages, completedPages, totalTimeInMillis); this.totalTerms = totalTerms; + this.emittedPages = emittedPages; } Status(StreamInput in) throws IOException { super(in); this.totalTerms = in.readVLong(); + this.emittedPages = in.readVLong(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVLong(totalTerms); + out.writeVLong(emittedPages); } @Override @@ -179,11 +255,20 @@ public String getWriteableName() { return ENTRY.name; } + public long emittedPages() { + return emittedPages; + } + + public long totalTerms() { + return totalTerms; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - innerToXContent(builder); - builder.field("total_terms", totalTerms); + super.innerToXContent(builder); + builder.field("emitted_pages", emittedPages()); + builder.field("total_terms", totalTerms()); return builder.endObject(); } @@ -196,12 +281,26 @@ public boolean equals(Object o) { return false; } Status status = (Status) o; - return totalTerms == status.totalTerms; + return totalTerms == status.totalTerms && emittedPages == status.emittedPages; } @Override public int hashCode() { - return Objects.hash(super.hashCode(), totalTerms); + return Objects.hash(super.hashCode(), totalTerms, emittedPages); + } + } + + protected record OngoingJoin(RightChunkedLeftJoin join, Iterator itr) implements Releasable { + @Override + public void close() { + Releasables.close(join, Releasables.wrap(() -> Iterators.map(itr, page -> page::releaseBlocks))); + } + + public void releaseOnAnyThread() { + Releasables.close( + join::releaseOnAnyThread, + Releasables.wrap(() -> Iterators.map(itr, page -> () -> releasePageOnAnyThread(page))) + ); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java index 0bbfc6dd0ce99..ad65394fdfbde 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java @@ -9,6 +9,7 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BigArrays; @@ -17,6 +18,7 @@ import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.lookup.QueryList; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.ShardId; @@ -33,6 +35,7 @@ import java.io.IOException; import java.util.List; +import java.util.Objects; /** * {@link LookupFromIndexService} performs lookup against a Lookup index for @@ -49,7 +52,16 @@ public LookupFromIndexService( BigArrays bigArrays, BlockFactory blockFactory ) { - super(LOOKUP_ACTION_NAME, clusterService, searchService, transportService, bigArrays, blockFactory, TransportRequest::readFrom); + super( + LOOKUP_ACTION_NAME, + clusterService, + searchService, + transportService, + bigArrays, + blockFactory, + false, + TransportRequest::readFrom + ); } @Override @@ -73,6 +85,16 @@ protected QueryList queryList(TransportRequest request, SearchExecutionContext c return termQueryList(fieldType, context, inputBlock, inputDataType); } + @Override + protected LookupResponse createLookupResponse(List pages, BlockFactory blockFactory) throws IOException { + return new LookupResponse(pages, blockFactory); + } + + @Override + protected AbstractLookupService.LookupResponse readLookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException { + return new LookupResponse(in, blockFactory); + } + @Override protected String getRequiredPrivilege() { return null; @@ -171,4 +193,65 @@ protected String extraDescription() { return " ,match_field=" + matchField; } } + + protected static class LookupResponse extends AbstractLookupService.LookupResponse { + private List pages; + + LookupResponse(List pages, BlockFactory blockFactory) { + super(blockFactory); + this.pages = pages; + } + + LookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException { + super(blockFactory); + try (BlockStreamInput bsi = new BlockStreamInput(in, blockFactory)) { + this.pages = bsi.readCollectionAsList(Page::new); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + long bytes = pages.stream().mapToLong(Page::ramBytesUsedByBlocks).sum(); + blockFactory.breaker().addEstimateBytesAndMaybeBreak(bytes, "serialize lookup join response"); + reservedBytes += bytes; + out.writeCollection(pages); + } + + @Override + protected List takePages() { + var p = pages; + pages = null; + return p; + } + + List pages() { + return pages; + } + + @Override + protected void innerRelease() { + if (pages != null) { + Releasables.closeExpectNoException(Releasables.wrap(Iterators.map(pages.iterator(), page -> page::releaseBlocks))); + } + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + LookupResponse that = (LookupResponse) o; + return Objects.equals(pages, that.pages); + } + + @Override + public int hashCode() { + return Objects.hashCode(pages); + } + + @Override + public String toString() { + return "LookupResponse{pages=" + pages + '}'; + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java index 29648d55cadd8..d30e99794a44e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java @@ -14,8 +14,10 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -33,10 +35,11 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isDate; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_NANOS; import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.DEFAULT_DATE_TIME_FORMATTER; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.nanoTimeToString; public class DateFormat extends EsqlConfigurationFunction implements OptionalArgument { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( @@ -55,10 +58,14 @@ public class DateFormat extends EsqlConfigurationFunction implements OptionalArg ) public DateFormat( Source source, - @Param(optional = true, name = "dateFormat", type = { "keyword", "text", "date" }, description = """ + @Param(optional = true, name = "dateFormat", type = { "keyword", "text", "date", "date_nanos" }, description = """ Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`.""") Expression format, - @Param(name = "date", type = { "date" }, description = "Date expression. If `null`, the function returns `null`.") Expression date, + @Param( + name = "date", + type = { "date", "date_nanos" }, + description = "Date expression. If `null`, the function returns `null`." + ) Expression date, Configuration configuration ) { super(source, date != null ? List.of(format, date) : List.of(format), configuration); @@ -114,7 +121,9 @@ protected TypeResolution resolveType() { } } - resolution = isDate(field, sourceText(), format == null ? FIRST : SECOND); + String operationName = sourceText(); + TypeResolutions.ParamOrdinal paramOrd = format == null ? FIRST : SECOND; + resolution = TypeResolutions.isType(field, DataType::isDate, operationName, paramOrd, "datetime or date_nanos"); if (resolution.unresolved()) { return resolution; } @@ -127,31 +136,63 @@ public boolean foldable() { return field.foldable() && (format == null || format.foldable()); } - @Evaluator(extraName = "Constant") - static BytesRef process(long val, @Fixed DateFormatter formatter) { + @Evaluator(extraName = "MillisConstant") + static BytesRef processMillis(long val, @Fixed DateFormatter formatter) { return new BytesRef(dateTimeToString(val, formatter)); } - @Evaluator - static BytesRef process(long val, BytesRef formatter, @Fixed Locale locale) { + @Evaluator(extraName = "Millis") + static BytesRef processMillis(long val, BytesRef formatter, @Fixed Locale locale) { return new BytesRef(dateTimeToString(val, toFormatter(formatter, locale))); } + @Evaluator(extraName = "NanosConstant") + static BytesRef processNanos(long val, @Fixed DateFormatter formatter) { + return new BytesRef(nanoTimeToString(val, formatter)); + } + + @Evaluator(extraName = "Nanos") + static BytesRef processNanos(long val, BytesRef formatter, @Fixed Locale locale) { + return new BytesRef(nanoTimeToString(val, toFormatter(formatter, locale))); + } + + private ExpressionEvaluator.Factory getConstantEvaluator( + DataType dateType, + EvalOperator.ExpressionEvaluator.Factory fieldEvaluator, + DateFormatter formatter + ) { + if (dateType == DATE_NANOS) { + return new DateFormatNanosConstantEvaluator.Factory(source(), fieldEvaluator, formatter); + } + return new DateFormatMillisConstantEvaluator.Factory(source(), fieldEvaluator, formatter); + } + + private ExpressionEvaluator.Factory getEvaluator( + DataType dateType, + EvalOperator.ExpressionEvaluator.Factory fieldEvaluator, + EvalOperator.ExpressionEvaluator.Factory formatEvaluator + ) { + if (dateType == DATE_NANOS) { + return new DateFormatNanosEvaluator.Factory(source(), fieldEvaluator, formatEvaluator, configuration().locale()); + } + return new DateFormatMillisEvaluator.Factory(source(), fieldEvaluator, formatEvaluator, configuration().locale()); + } + @Override public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { var fieldEvaluator = toEvaluator.apply(field); if (format == null) { - return new DateFormatConstantEvaluator.Factory(source(), fieldEvaluator, DEFAULT_DATE_TIME_FORMATTER); + return getConstantEvaluator(field().dataType(), fieldEvaluator, DEFAULT_DATE_TIME_FORMATTER); } if (DataType.isString(format.dataType()) == false) { throw new IllegalArgumentException("unsupported data type for format [" + format.dataType() + "]"); } if (format.foldable()) { DateFormatter formatter = toFormatter(format.fold(toEvaluator.foldCtx()), configuration().locale()); - return new DateFormatConstantEvaluator.Factory(source(), fieldEvaluator, formatter); + return getConstantEvaluator(field.dataType(), fieldEvaluator, formatter); } var formatEvaluator = toEvaluator.apply(format); - return new DateFormatEvaluator.Factory(source(), fieldEvaluator, formatEvaluator, configuration().locale()); + return getEvaluator(field().dataType(), fieldEvaluator, formatEvaluator); } private static DateFormatter toFormatter(Object format, Locale locale) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/util/Delay.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/util/Delay.java index 3b17133bf4974..712dff3024de9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/util/Delay.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/util/Delay.java @@ -10,8 +10,9 @@ import org.elasticsearch.Build; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.compute.ann.Evaluator; -import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FoldContext; @@ -102,21 +103,42 @@ private long msValue(FoldContext ctx) { @Override public ExpressionEvaluator.Factory toEvaluator(EvaluatorMapper.ToEvaluator toEvaluator) { - return new DelayEvaluator.Factory(source(), msValue(toEvaluator.foldCtx())); + return context -> new DelayEvaluator(context, msValue(toEvaluator.foldCtx())); } - @Evaluator - static boolean process(@Fixed long ms) { - // Only activate in snapshot builds - if (Build.current().isSnapshot()) { + static final class DelayEvaluator implements ExpressionEvaluator { + private final DriverContext driverContext; + private final long ms; + + DelayEvaluator(DriverContext driverContext, long ms) { + if (Build.current().isSnapshot() == false) { + throw new IllegalArgumentException("Delay function is only available in snapshot builds"); + } + this.driverContext = driverContext; + this.ms = ms; + } + + @Override + public Block eval(Page page) { + int positionCount = page.getPositionCount(); + for (int p = 0; p < positionCount; p++) { + delay(ms); + } + return driverContext.blockFactory().newConstantBooleanBlockWith(true, positionCount); + } + + private void delay(long ms) { try { + driverContext.checkForEarlyTermination(); Thread.sleep(ms); } catch (InterruptedException e) { - return true; + Thread.currentThread().interrupt(); } - } else { - throw new IllegalArgumentException("Delay function is only available in snapshot builds"); } - return true; + + @Override + public void close() { + + } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java index e4310a0bbf3a6..89b4231a999d4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java @@ -24,132 +24,16 @@ * examples below are *just* used for that. Don't make more of those - add them * to {@link EsqlCapabilities} instead. *

- * NOTE: You can't remove a feature now and probably never will be able to. + * NOTE: You can only remove features on major version boundaries. * Only add more of these if you need a fast CPU level check. *

*/ public class EsqlFeatures implements FeatureSpecification { - /** - * Introduction of {@code MV_SORT}, {@code MV_SLICE}, and {@code MV_ZIP}. - * Added in #106095. - */ - private static final NodeFeature MV_SORT = new NodeFeature("esql.mv_sort", true); - - /** - * When we disabled some broken optimizations around {@code nullable}. - * Fixed in #105691. - */ - private static final NodeFeature DISABLE_NULLABLE_OPTS = new NodeFeature("esql.disable_nullable_opts", true); - - /** - * Introduction of {@code ST_X} and {@code ST_Y}. Added in #105768. - */ - private static final NodeFeature ST_X_Y = new NodeFeature("esql.st_x_y", true); - - /** - * Changed precision of {@code geo_point} and {@code cartesian_point} fields, by loading from source into WKB. Done in #103691. - */ - private static final NodeFeature SPATIAL_POINTS_FROM_SOURCE = new NodeFeature("esql.spatial_points_from_source", true); - - /** - * Support for loading {@code geo_shape} and {@code cartesian_shape} fields. Done in #104269. - */ - private static final NodeFeature SPATIAL_SHAPES = new NodeFeature("esql.spatial_shapes", true); - - /** - * Support for spatial aggregation {@code ST_CENTROID}. Done in #104269. - */ - private static final NodeFeature ST_CENTROID_AGG = new NodeFeature("esql.st_centroid_agg", true); - - /** - * Support for spatial aggregation {@code ST_INTERSECTS}. Done in #104907. - */ - private static final NodeFeature ST_INTERSECTS = new NodeFeature("esql.st_intersects", true); - - /** - * Support for spatial aggregation {@code ST_CONTAINS} and {@code ST_WITHIN}. Done in #106503. - */ - private static final NodeFeature ST_CONTAINS_WITHIN = new NodeFeature("esql.st_contains_within", true); - - /** - * Support for spatial aggregation {@code ST_DISJOINT}. Done in #107007. - */ - private static final NodeFeature ST_DISJOINT = new NodeFeature("esql.st_disjoint", true); - - /** - * The introduction of the {@code VALUES} agg. - */ - private static final NodeFeature AGG_VALUES = new NodeFeature("esql.agg_values", true); - - /** - * Does ESQL support async queries. - */ - public static final NodeFeature ASYNC_QUERY = new NodeFeature("esql.async_query", true); - - /** - * Does ESQL support FROM OPTIONS? - */ - @Deprecated - public static final NodeFeature FROM_OPTIONS = new NodeFeature("esql.from_options", true); - - /** - * Cast string literals to a desired data type. - */ - public static final NodeFeature STRING_LITERAL_AUTO_CASTING = new NodeFeature("esql.string_literal_auto_casting", true); - - /** - * Base64 encoding and decoding functions. - */ - public static final NodeFeature BASE64_DECODE_ENCODE = new NodeFeature("esql.base64_decode_encode", true); - - /** - * Support for the :: casting operator - */ - public static final NodeFeature CASTING_OPERATOR = new NodeFeature("esql.casting_operator", true); - - /** - * Blocks can be labelled with {@link org.elasticsearch.compute.data.Block.MvOrdering#SORTED_ASCENDING} for optimizations. - */ - public static final NodeFeature MV_ORDERING_SORTED_ASCENDING = new NodeFeature("esql.mv_ordering_sorted_ascending", true); - - /** - * Support for metrics counter fields - */ - public static final NodeFeature METRICS_COUNTER_FIELDS = new NodeFeature("esql.metrics_counter_fields", true); - - /** - * Cast string literals to a desired data type for IN predicate and more types for BinaryComparison. - */ - public static final NodeFeature STRING_LITERAL_AUTO_CASTING_EXTENDED = new NodeFeature( - "esql.string_literal_auto_casting_extended", - true - ); - - /** - * Support for metadata fields. - */ - public static final NodeFeature METADATA_FIELDS = new NodeFeature("esql.metadata_fields", true); - - /** - * Support for timespan units abbreviations - */ - public static final NodeFeature TIMESPAN_ABBREVIATIONS = new NodeFeature("esql.timespan_abbreviations", true); - - /** - * Support metrics counter types - */ - public static final NodeFeature COUNTER_TYPES = new NodeFeature("esql.counter_types", true); - /** * Support metrics syntax */ public static final NodeFeature METRICS_SYNTAX = new NodeFeature("esql.metrics_syntax"); - /** - * Internal resolve_fields API for ES|QL - */ - public static final NodeFeature RESOLVE_FIELDS_API = new NodeFeature("esql.resolve_fields_api", true); - private Set snapshotBuildFeatures() { assert Build.current().isSnapshot() : Build.current(); return Set.of(METRICS_SYNTAX); @@ -157,30 +41,7 @@ private Set snapshotBuildFeatures() { @Override public Set getFeatures() { - Set features = Set.of( - ASYNC_QUERY, - AGG_VALUES, - BASE64_DECODE_ENCODE, - MV_SORT, - DISABLE_NULLABLE_OPTS, - ST_X_Y, - FROM_OPTIONS, - SPATIAL_POINTS_FROM_SOURCE, - SPATIAL_SHAPES, - ST_CENTROID_AGG, - ST_INTERSECTS, - ST_CONTAINS_WITHIN, - ST_DISJOINT, - STRING_LITERAL_AUTO_CASTING, - CASTING_OPERATOR, - MV_ORDERING_SORTED_ASCENDING, - METRICS_COUNTER_FIELDS, - STRING_LITERAL_AUTO_CASTING_EXTENDED, - METADATA_FIELDS, - TIMESPAN_ABBREVIATIONS, - COUNTER_TYPES, - RESOLVE_FIELDS_API - ); + Set features = Set.of(); if (Build.current().isSnapshot()) { return Collections.unmodifiableSet(Sets.union(features, snapshotBuildFeatures())); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index 95ee6ab337bd6..eef0df6b89dd3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -539,6 +539,10 @@ public static String dateTimeToString(long dateTime, DateFormatter formatter) { return formatter == null ? dateTimeToString(dateTime) : formatter.formatMillis(dateTime); } + public static String nanoTimeToString(long dateTime, DateFormatter formatter) { + return formatter == null ? nanoTimeToString(dateTime) : formatter.formatNanos(dateTime); + } + public static BytesRef numericBooleanToString(Object field) { return new BytesRef(String.valueOf(field)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index ee5073c05cab1..ed1ee71ff1968 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -241,7 +241,10 @@ public final void test() throws Throwable { * The csv tests support all but a few features. The unsupported features * are tested in integration tests. */ - assumeFalse("metadata fields aren't supported", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METADATA_FIELDS))); + assumeFalse( + "metadata fields aren't supported", + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.METADATA_FIELDS.capabilityName()) + ); assumeFalse( "enrich can't load fields in csv tests", testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.ENRICH_LOAD.capabilityName()) @@ -265,7 +268,7 @@ public final void test() throws Throwable { ); assumeFalse( "lookup join disabled for csv tests", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V10.capabilityName()) + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V11.capabilityName()) ); assumeFalse( "can't use TERM function in csv tests", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java index 134981d3c3b0c..ebfe1c8147073 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java @@ -63,7 +63,12 @@ private DriverStatus.OperatorStatus randomOperatorStatus() { String name = randomAlphaOfLength(4); Operator.Status status = randomBoolean() ? null - : new AbstractPageMappingOperator.Status(randomNonNegativeLong(), between(0, Integer.MAX_VALUE)); + : new AbstractPageMappingOperator.Status( + randomNonNegativeLong(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); return new DriverStatus.OperatorStatus(name, status); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 4b9ae0d1692e5..1d49409dc964d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -683,7 +683,7 @@ public void testProfileXContent() { 20021, 20000, 12, - List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10))), + List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10, 111, 222))), DriverSleeps.empty() ) ) @@ -722,7 +722,9 @@ public void testProfileXContent() { "operator" : "asdf", "status" : { "process_nanos" : 10021, - "pages_processed" : 10 + "pages_processed" : 10, + "rows_received" : 111, + "rows_emitted" : 222 } } ], diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/NamedWriteablesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/NamedWriteablesTests.java index f8a036a262701..1fba8f66d0f17 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/NamedWriteablesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/NamedWriteablesTests.java @@ -20,10 +20,21 @@ public class NamedWriteablesTests extends ESTestCase { public void testTopNStatus() throws Exception { try (EsqlPlugin plugin = new EsqlPlugin()) { NamedWriteableRegistry registry = new NamedWriteableRegistry(plugin.getNamedWriteables()); - TopNOperatorStatus origin = new TopNOperatorStatus(randomNonNegativeInt(), randomNonNegativeLong()); + TopNOperatorStatus origin = new TopNOperatorStatus( + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeInt(), + randomNonNegativeInt(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); TopNOperatorStatus copy = (TopNOperatorStatus) copyNamedWriteable(origin, registry, Operator.Status.class); assertThat(copy.occupiedRows(), equalTo(origin.occupiedRows())); assertThat(copy.ramBytesUsed(), equalTo(origin.ramBytesUsed())); + assertThat(copy.pagesReceived(), equalTo(origin.pagesReceived())); + assertThat(copy.pagesEmitted(), equalTo(origin.pagesEmitted())); + assertThat(copy.rowsReceived(), equalTo(origin.rowsReceived())); + assertThat(copy.rowsEmitted(), equalTo(origin.rowsEmitted())); } } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 2df6e30e96081..84af46a8cbbf0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1198,21 +1198,21 @@ public void testDateFormatOnInt() { verifyUnsupported(""" from test | eval date_format(int) - """, "first argument of [date_format(int)] must be [datetime], found value [int] type [integer]"); + """, "first argument of [date_format(int)] must be [datetime or date_nanos], found value [int] type [integer]"); } public void testDateFormatOnFloat() { verifyUnsupported(""" from test | eval date_format(float) - """, "first argument of [date_format(float)] must be [datetime], found value [float] type [double]"); + """, "first argument of [date_format(float)] must be [datetime or date_nanos], found value [float] type [double]"); } public void testDateFormatOnText() { verifyUnsupported(""" from test | eval date_format(keyword) - """, "first argument of [date_format(keyword)] must be [datetime], found value [keyword] type [keyword]"); + """, "first argument of [date_format(keyword)] must be [datetime or date_nanos], found value [keyword] type [keyword]"); } public void testDateFormatWithNumericFormat() { @@ -2140,7 +2140,7 @@ public void testLookupMatchTypeWrong() { } public void testLookupJoinUnknownIndex() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); String errorMessage = "Unknown index [foobar]"; IndexResolution missingLookupIndex = IndexResolution.invalid(errorMessage); @@ -2169,7 +2169,7 @@ public void testLookupJoinUnknownIndex() { } public void testLookupJoinUnknownField() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); String query = "FROM test | LOOKUP JOIN languages_lookup ON last_name"; String errorMessage = "1:45: Unknown column [last_name] in right side of join"; @@ -2192,7 +2192,7 @@ public void testLookupJoinUnknownField() { } public void testMultipleLookupJoinsGiveDifferentAttributes() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); // The field attributes that get contributed by different LOOKUP JOIN commands must have different name ids, // even if they have the same names. Otherwise, things like dependency analysis - like in PruneColumns - cannot work based on @@ -2222,7 +2222,7 @@ public void testMultipleLookupJoinsGiveDifferentAttributes() { } public void testLookupJoinIndexMode() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); var indexResolution = AnalyzerTestUtils.expandedDefaultIndexResolution(); var lookupResolution = AnalyzerTestUtils.defaultLookupResolution(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java index 180e32fb7c15d..2ee6cf6136114 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java @@ -113,7 +113,7 @@ public void testTooBigQuery() { } public void testJoinOnConstant() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertEquals( "1:55: JOIN ON clause only supports fields at the moment, found [123]", error("row languages = 1, gender = \"f\" | lookup join test on 123") @@ -129,7 +129,7 @@ public void testJoinOnConstant() { } public void testJoinOnMultipleFields() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertEquals( "1:35: JOIN ON clause only supports one field at the moment, found [2]", error("row languages = 1, gender = \"f\" | lookup join test on gender, languages") @@ -137,7 +137,7 @@ public void testJoinOnMultipleFields() { } public void testJoinTwiceOnTheSameField() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertEquals( "1:35: JOIN ON clause only supports one field at the moment, found [2]", error("row languages = 1, gender = \"f\" | lookup join test on languages, languages") @@ -145,7 +145,7 @@ public void testJoinTwiceOnTheSameField() { } public void testJoinTwiceOnTheSameField_TwoLookups() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertEquals( "1:80: JOIN ON clause only supports one field at the moment, found [2]", error("row languages = 1, gender = \"f\" | lookup join test on languages | eval x = 1 | lookup join test on gender, gender") diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index d78c4bfa21ced..f932992e81557 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -1984,7 +1984,7 @@ public void testSortByAggregate() { } public void testLookupJoinDataTypeMismatch() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); query("FROM test | EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperatorStatusTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperatorStatusTests.java new file mode 100644 index 0000000000000..a204e93b0d16a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperatorStatusTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.enrich; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class LookupFromIndexOperatorStatusTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return LookupFromIndexOperator.Status::new; + } + + @Override + protected LookupFromIndexOperator.Status createTestInstance() { + return new LookupFromIndexOperator.Status( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomLongBetween(0, TimeValue.timeValueHours(1).millis()), + randomNonNegativeLong(), + randomNonNegativeLong() + ); + } + + @Override + protected LookupFromIndexOperator.Status mutateInstance(LookupFromIndexOperator.Status in) throws IOException { + long receivedPages = in.receivedPages(); + long completedPages = in.completedPages(); + long totalTimeInMillis = in.totalTimeInMillis(); + long totalTerms = in.totalTerms(); + long emittedPages = in.emittedPages(); + switch (randomIntBetween(0, 4)) { + case 0 -> receivedPages = randomValueOtherThan(receivedPages, ESTestCase::randomNonNegativeLong); + case 1 -> completedPages = randomValueOtherThan(completedPages, ESTestCase::randomNonNegativeLong); + case 2 -> totalTimeInMillis = randomValueOtherThan(totalTimeInMillis, ESTestCase::randomNonNegativeLong); + case 3 -> totalTerms = randomValueOtherThan(totalTerms, ESTestCase::randomNonNegativeLong); + case 4 -> emittedPages = randomValueOtherThan(emittedPages, ESTestCase::randomNonNegativeLong); + default -> throw new UnsupportedOperationException(); + } + return new LookupFromIndexOperator.Status(receivedPages, completedPages, totalTimeInMillis, totalTerms, emittedPages); + } + + public void testToXContent() { + var status = new LookupFromIndexOperator.Status(100, 50, TimeValue.timeValueSeconds(10).millis(), 120, 88); + String json = Strings.toString(status, true, true); + assertThat(json, equalTo(""" + { + "received_pages" : 100, + "completed_pages" : 50, + "total_time_in_millis" : 10000, + "total_time" : "10s", + "emitted_pages" : 88, + "total_terms" : 120 + }""")); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexServiceResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexServiceResponseTests.java new file mode 100644 index 0000000000000..098ea9eaa0c2d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexServiceResponseTests.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.enrich; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.esql.TestBlockFactory; +import org.junit.After; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class LookupFromIndexServiceResponseTests extends AbstractWireSerializingTestCase { + private final List breakers = new ArrayList<>(); + + LookupFromIndexService.LookupResponse createTestInstance(BlockFactory blockFactory) { + return new LookupFromIndexService.LookupResponse(randomList(0, 10, () -> testPage(blockFactory)), blockFactory); + } + + /** + * Build a {@link Page} to test serialization. If we had nice random + * {@linkplain Page} generation we'd use that happily, but it's off + * in the tests for compute, and we're in ESQL. And we don't + * really need a fully random one to verify serialization + * here. + */ + Page testPage(BlockFactory blockFactory) { + try (IntVector.Builder builder = blockFactory.newIntVectorFixedBuilder(3)) { + builder.appendInt(1); + builder.appendInt(2); + builder.appendInt(3); + return new Page(builder.build().asBlock()); + } + } + + @Override + protected LookupFromIndexService.LookupResponse createTestInstance() { + // Can't use a real block factory for the basic serialization tests because they don't release. + return createTestInstance(TestBlockFactory.getNonBreakingInstance()); + } + + @Override + protected Writeable.Reader instanceReader() { + return in -> new LookupFromIndexService.LookupResponse(in, TestBlockFactory.getNonBreakingInstance()); + } + + @Override + protected LookupFromIndexService.LookupResponse mutateInstance(LookupFromIndexService.LookupResponse instance) throws IOException { + assertThat(instance.blockFactory, sameInstance(TestBlockFactory.getNonBreakingInstance())); + List pages = new ArrayList<>(instance.pages().size()); + pages.addAll(instance.pages()); + pages.add(testPage(TestBlockFactory.getNonBreakingInstance())); + return new LookupFromIndexService.LookupResponse(pages, instance.blockFactory); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(List.of(IntBlock.ENTRY)); + } + + public void testWithBreaker() throws IOException { + BlockFactory origFactory = blockFactory(); + BlockFactory copyFactory = blockFactory(); + LookupFromIndexService.LookupResponse orig = createTestInstance(origFactory); + try { + LookupFromIndexService.LookupResponse copy = copyInstance( + orig, + getNamedWriteableRegistry(), + (out, v) -> v.writeTo(out), + in -> new LookupFromIndexService.LookupResponse(in, copyFactory), + TransportVersion.current() + ); + try { + assertThat(copy, equalTo(orig)); + } finally { + copy.decRef(); + } + assertThat(copyFactory.breaker().getUsed(), equalTo(0L)); + } finally { + orig.decRef(); + } + assertThat(origFactory.breaker().getUsed(), equalTo(0L)); + } + + /** + * Tests that we don't reserve any memory other than that in the {@link Page}s we + * hold, and calling {@link LookupFromIndexService.LookupResponse#takePages} + * gives us those pages. If we then close those pages, we should have 0 + * reserved memory. + */ + public void testTakePages() { + BlockFactory factory = blockFactory(); + LookupFromIndexService.LookupResponse orig = createTestInstance(factory); + try { + if (orig.pages().isEmpty()) { + assertThat(factory.breaker().getUsed(), equalTo(0L)); + return; + } + List pages = orig.takePages(); + Releasables.closeExpectNoException(Releasables.wrap(Iterators.map(pages.iterator(), page -> page::releaseBlocks))); + assertThat(factory.breaker().getUsed(), equalTo(0L)); + assertThat(orig.takePages(), nullValue()); + } finally { + orig.decRef(); + } + assertThat(factory.breaker().getUsed(), equalTo(0L)); + } + + private BlockFactory blockFactory() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofMb(4 /* more than we need*/)) + .withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + return new BlockFactory(breaker, bigArrays); + } + + @After + public void allBreakersEmpty() throws Exception { + // first check that all big arrays are released, which can affect breakers + MockBigArrays.ensureAllArraysAreReleased(); + + for (CircuitBreaker breaker : breakers) { + assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatErrorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatErrorTests.java index a5e6514b3e02c..c7eebdd82be53 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatErrorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatErrorTests.java @@ -37,7 +37,7 @@ protected Matcher expectedTypeErrorMatcher(List> validPerP String source = sourceForSignature(signature); String name = signature.get(0).typeName(); if (signature.size() == 1) { - return equalTo("first argument of [" + source + "] must be [datetime], found value [] type [" + name + "]"); + return equalTo("first argument of [" + source + "] must be [datetime or date_nanos], found value [] type [" + name + "]"); } // Two argument version // Handle the weird case where we're calling the two argument version with the date first instead of the format. @@ -46,7 +46,7 @@ protected Matcher expectedTypeErrorMatcher(List> validPerP } return equalTo(typeErrorMessage(true, validPerPosition, signature, (v, p) -> switch (p) { case 0 -> "string"; - case 1 -> "datetime"; + case 1 -> "datetime or date_nanos"; default -> ""; })); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java index 3dd1f3e629da4..1167b91a81e35 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -44,7 +45,24 @@ public static Iterable parameters() { DataType.KEYWORD, TestCaseSupplier.dateFormatCases(), TestCaseSupplier.dateCases(Instant.parse("1900-01-01T00:00:00.00Z"), Instant.parse("9999-12-31T00:00:00.00Z")), - matchesPattern("DateFormatEvaluator\\[val=Attribute\\[channel=1], formatter=Attribute\\[(channel=0|\\w+)], locale=en_US]"), + matchesPattern( + "DateFormatMillisEvaluator\\[val=Attribute\\[channel=1], formatter=Attribute\\[(channel=0|\\w+)], locale=en_US]" + ), + (lhs, rhs) -> List.of(), + false + ) + ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + (format, value) -> new BytesRef( + DateFormatter.forPattern(((BytesRef) format).utf8ToString()).formatNanos(DateUtils.toLong((Instant) value)) + ), + DataType.KEYWORD, + TestCaseSupplier.dateFormatCases(), + TestCaseSupplier.dateNanosCases(), + matchesPattern( + "DateFormatNanosEvaluator\\[val=Attribute\\[channel=1], formatter=Attribute\\[(channel=0|\\w+)], locale=en_US]" + ), (lhs, rhs) -> List.of(), false ) @@ -52,12 +70,20 @@ public static Iterable parameters() { // Default formatter cases TestCaseSupplier.unary( suppliers, - "DateFormatConstantEvaluator[val=Attribute[channel=0], formatter=format[strict_date_optional_time] locale[]]", + "DateFormatMillisConstantEvaluator[val=Attribute[channel=0], formatter=format[strict_date_optional_time] locale[]]", TestCaseSupplier.dateCases(Instant.parse("1900-01-01T00:00:00.00Z"), Instant.parse("9999-12-31T00:00:00.00Z")), DataType.KEYWORD, (value) -> new BytesRef(EsqlDataTypeConverter.DEFAULT_DATE_TIME_FORMATTER.formatMillis(((Instant) value).toEpochMilli())), List.of() ); + TestCaseSupplier.unary( + suppliers, + "DateFormatNanosConstantEvaluator[val=Attribute[channel=0], formatter=format[strict_date_optional_time] locale[]]", + TestCaseSupplier.dateNanosCases(), + DataType.KEYWORD, + (value) -> new BytesRef(EsqlDataTypeConverter.DEFAULT_DATE_TIME_FORMATTER.formatNanos(DateUtils.toLong((Instant) value))), + List.of() + ); return parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(true, suppliers); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 2aed259e7ad0b..a8f8054fbc6b1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -4928,7 +4928,7 @@ public void testPlanSanityCheck() throws Exception { } public void testPlanSanityCheckWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); var plan = optimizedPlan(""" FROM test @@ -6003,7 +6003,7 @@ public void testLookupStats() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); String query = """ FROM test @@ -6045,7 +6045,7 @@ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnLeftSideField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); String query = """ FROM test @@ -6088,7 +6088,7 @@ public void testLookupJoinPushDownFilterOnLeftSideField() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownDisabledForLookupField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); String query = """ FROM test @@ -6132,7 +6132,7 @@ public void testLookupJoinPushDownDisabledForLookupField() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); String query = """ FROM test @@ -6183,7 +6183,7 @@ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightFiel * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownDisabledForDisjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); String query = """ FROM test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 66891210a1e47..75825f4e8f480 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -2618,7 +2618,7 @@ public void testVerifierOnMissingReferences() { } public void testVerifierOnMissingReferencesWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); // Do not assert serialization: // This will have a LookupJoinExec, which is not serializable because it doesn't leave the coordinator. @@ -7301,7 +7301,7 @@ public void testLookupThenTopN() { } public void testLookupJoinFieldLoading() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); TestDataSource data = dataSetWithLookupIndices(Map.of("lookup_index", List.of("first_name", "foo", "bar", "baz"))); @@ -7378,7 +7378,7 @@ public void testLookupJoinFieldLoading() throws Exception { } public void testLookupJoinFieldLoadingTwoLookups() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); TestDataSource data = dataSetWithLookupIndices( Map.of( @@ -7432,7 +7432,7 @@ public void testLookupJoinFieldLoadingTwoLookups() throws Exception { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/119082") public void testLookupJoinFieldLoadingTwoLookupsProjectInBetween() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); TestDataSource data = dataSetWithLookupIndices( Map.of( @@ -7473,7 +7473,7 @@ public void testLookupJoinFieldLoadingTwoLookupsProjectInBetween() throws Except @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/118778") public void testLookupJoinFieldLoadingDropAllFields() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); TestDataSource data = dataSetWithLookupIndices(Map.of("lookup_index", List.of("first_name", "foo", "bar", "baz"))); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index 4db4f7925d4ff..b1c9030db7a43 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -1365,7 +1365,7 @@ public void testMetrics() { } public void testLookupJoin() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( "FROM employees | KEEP languages | RENAME languages AS language_code | LOOKUP JOIN languages_lookup ON language_code", Set.of("languages", "languages.*", "language_code", "language_code.*"), @@ -1374,7 +1374,7 @@ public void testLookupJoin() { } public void testLookupJoinKeep() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM employees @@ -1388,7 +1388,7 @@ public void testLookupJoinKeep() { } public void testLookupJoinKeepWildcard() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM employees @@ -1402,7 +1402,7 @@ public void testLookupJoinKeepWildcard() { } public void testMultiLookupJoin() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1415,7 +1415,7 @@ public void testMultiLookupJoin() { } public void testMultiLookupJoinKeepBefore() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1429,7 +1429,7 @@ public void testMultiLookupJoinKeepBefore() { } public void testMultiLookupJoinKeepBetween() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1454,7 +1454,7 @@ public void testMultiLookupJoinKeepBetween() { } public void testMultiLookupJoinKeepAfter() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1481,7 +1481,7 @@ public void testMultiLookupJoinKeepAfter() { } public void testMultiLookupJoinKeepAfterWildcard() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1495,7 +1495,7 @@ public void testMultiLookupJoinKeepAfterWildcard() { } public void testMultiLookupJoinSameIndex() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1509,7 +1509,7 @@ public void testMultiLookupJoinSameIndex() { } public void testMultiLookupJoinSameIndexKeepBefore() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1524,7 +1524,7 @@ public void testMultiLookupJoinSameIndexKeepBefore() { } public void testMultiLookupJoinSameIndexKeepBetween() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1550,7 +1550,7 @@ public void testMultiLookupJoinSameIndexKeepBetween() { } public void testMultiLookupJoinSameIndexKeepAfter() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V11.isEnabled()); assertFieldNames( """ FROM sample_data diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 50f83dc7b02eb..d2be50cb5e841 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -242,12 +242,7 @@ public void testGetServicesWithRerankTaskType() throws IOException { @SuppressWarnings("unchecked") public void testGetServicesWithCompletionTaskType() throws IOException { List services = getServices(TaskType.COMPLETION); - if ((ElasticInferenceServiceFeature.DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() - || ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled())) { - assertThat(services.size(), equalTo(10)); - } else { - assertThat(services.size(), equalTo(9)); - } + assertThat(services.size(), equalTo(9)); String[] providers = new String[services.size()]; for (int i = 0; i < services.size(); i++) { @@ -269,9 +264,30 @@ public void testGetServicesWithCompletionTaskType() throws IOException { ) ); + assertArrayEquals(providers, providerList.toArray()); + } + + @SuppressWarnings("unchecked") + public void testGetServicesWithChatCompletionTaskType() throws IOException { + List services = getServices(TaskType.CHAT_COMPLETION); if ((ElasticInferenceServiceFeature.DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() || ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled())) { - providerList.add(6, "elastic"); + assertThat(services.size(), equalTo(2)); + } else { + assertThat(services.size(), equalTo(1)); + } + + String[] providers = new String[services.size()]; + for (int i = 0; i < services.size(); i++) { + Map serviceConfig = (Map) services.get(i); + providers[i] = (String) serviceConfig.get("service"); + } + + var providerList = new ArrayList<>(List.of("openai")); + + if ((ElasticInferenceServiceFeature.DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() + || ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled())) { + providerList.addFirst("elastic"); } assertArrayEquals(providers, providerList.toArray()); diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java index 3f7a51cc4e9f9..9da6b52555498 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.elasticsearch.xpack.inference.Utils; import org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension; import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; @@ -74,7 +74,7 @@ public void setup() throws Exception { @Override protected Collection> nodePlugins() { - return Arrays.asList(Utils.TestInferencePlugin.class, LocalStateCompositeXPackPlugin.class); + return Arrays.asList(LocalStateInferencePlugin.class); } @Override diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java index 24585318b15b3..fd0480b141981 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java @@ -31,8 +31,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.inference.InferencePlugin; +import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalModel; @@ -77,7 +76,7 @@ public void createComponents() { @Override protected Collection> getPlugins() { - return pluginList(ReindexPlugin.class, InferencePlugin.class, LocalStateCompositeXPackPlugin.class); + return pluginList(ReindexPlugin.class, LocalStateInferencePlugin.class); } public void testStoreModel() throws Exception { @@ -435,7 +434,10 @@ public void testGetAllModels_withDoNotPersist() throws Exception { assertNull(exceptionHolder.get()); assertThat(modelHolder.get(), hasSize(2)); - expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareGetIndex().addIndices(".inference").get()); + expectThrows( + IndexNotFoundException.class, + () -> client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(".inference").get() + ); // this time check the index is created blockingCall(listener -> modelRegistry.getAllModels(true, listener), modelHolder, exceptionHolder); @@ -553,7 +555,7 @@ public void testGetByTaskType_WithDefaults() throws Exception { } private void assertInferenceIndexExists() { - var indexResponse = client().admin().indices().prepareGetIndex().addIndices(".inference").get(); + var indexResponse = client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(".inference").get(); assertNotNull(indexResponse.getSettings()); assertNotNull(indexResponse.getMappings()); } diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index 53974657e4e23..1c2240e8c5217 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -34,6 +34,7 @@ requires software.amazon.awssdk.retries.api; requires org.reactivestreams; requires org.elasticsearch.logging; + requires org.elasticsearch.sslconfig; exports org.elasticsearch.xpack.inference.action; exports org.elasticsearch.xpack.inference.registry; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 580e5dd4bf8a1..7d6069572ba21 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -11,8 +11,6 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsMapper; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; -import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; -import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import java.util.Set; @@ -26,18 +24,6 @@ */ public class InferenceFeatures implements FeatureSpecification { - @Override - public Set getFeatures() { - return Set.of( - TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED, - RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED, - SemanticTextFieldMapper.SEMANTIC_TEXT_SEARCH_INFERENCE_ID, - SemanticQueryBuilder.SEMANTIC_TEXT_INNER_HITS, - SemanticTextFieldMapper.SEMANTIC_TEXT_DEFAULT_ELSER_2, - TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED - ); - } - private static final NodeFeature SEMANTIC_TEXT_HIGHLIGHTER = new NodeFeature("semantic_text.highlighter"); @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index d303ead4d9188..6ba529cb66eaa 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -31,6 +31,7 @@ import org.elasticsearch.inference.InferenceServiceRegistry; import org.elasticsearch.license.License; import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.node.PluginComponentBinding; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; @@ -49,6 +50,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.inference.action.DeleteInferenceEndpointAction; import org.elasticsearch.xpack.core.inference.action.GetInferenceDiagnosticsAction; @@ -58,6 +60,7 @@ import org.elasticsearch.xpack.core.inference.action.PutInferenceModelAction; import org.elasticsearch.xpack.core.inference.action.UnifiedCompletionAction; import org.elasticsearch.xpack.core.inference.action.UpdateInferenceModelAction; +import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.inference.action.TransportDeleteInferenceEndpointAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceDiagnosticsAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceModelAction; @@ -126,7 +129,6 @@ import java.util.Map; import java.util.function.Predicate; import java.util.function.Supplier; -import java.util.stream.Collectors; import java.util.stream.Stream; import static java.util.Collections.singletonList; @@ -166,6 +168,7 @@ public class InferencePlugin extends Plugin implements ActionPlugin, ExtensibleP private final Settings settings; private final SetOnce httpFactory = new SetOnce<>(); private final SetOnce amazonBedrockFactory = new SetOnce<>(); + private final SetOnce elasicInferenceServiceFactory = new SetOnce<>(); private final SetOnce serviceComponents = new SetOnce<>(); // This is mainly so that the rest handlers can access the ThreadPool in a way that avoids potential null pointers from it // not being initialized yet @@ -252,31 +255,31 @@ public Collection createComponents(PluginServices services) { var inferenceServices = new ArrayList<>(inferenceServiceExtensions); inferenceServices.add(this::getInferenceServiceFactories); - // Set elasticInferenceUrl based on feature flags to support transitioning to the new Elastic Inference Service URL without exposing - // internal names like "eis" or "gateway". - ElasticInferenceServiceSettings inferenceServiceSettings = new ElasticInferenceServiceSettings(settings); - - String elasticInferenceUrl = null; + if (isElasticInferenceServiceEnabled()) { + // Create a separate instance of HTTPClientManager with its own SSL configuration (`xpack.inference.elastic.http.ssl.*`). + var elasticInferenceServiceHttpClientManager = HttpClientManager.create( + settings, + services.threadPool(), + services.clusterService(), + throttlerManager, + getSslService() + ); - if (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - elasticInferenceUrl = inferenceServiceSettings.getElasticInferenceServiceUrl(); - } else if (DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - log.warn( - "Deprecated flag {} detected for enabling {}. Please use {}.", - ELASTIC_INFERENCE_SERVICE_IDENTIFIER, - DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG, - ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG + var elasticInferenceServiceRequestSenderFactory = new HttpRequestSender.Factory( + serviceComponents.get(), + elasticInferenceServiceHttpClientManager, + services.clusterService() ); - elasticInferenceUrl = inferenceServiceSettings.getEisGatewayUrl(); - } + elasicInferenceServiceFactory.set(elasticInferenceServiceRequestSenderFactory); - if (elasticInferenceUrl != null) { + ElasticInferenceServiceSettings inferenceServiceSettings = new ElasticInferenceServiceSettings(settings); + String elasticInferenceUrl = this.getElasticInferenceServiceUrl(inferenceServiceSettings); elasticInferenceServiceComponents.set(new ElasticInferenceServiceComponents(elasticInferenceUrl)); inferenceServices.add( () -> List.of( context -> new ElasticInferenceService( - httpFactory.get(), + elasicInferenceServiceFactory.get(), serviceComponents.get(), elasticInferenceServiceComponents.get() ) @@ -400,16 +403,21 @@ public static ExecutorBuilder inferenceUtilityExecutor(Settings settings) { @Override public List> getSettings() { - return Stream.of( - HttpSettings.getSettingsDefinitions(), - HttpClientManager.getSettingsDefinitions(), - ThrottlerManager.getSettingsDefinitions(), - RetrySettings.getSettingsDefinitions(), - ElasticInferenceServiceSettings.getSettingsDefinitions(), - Truncator.getSettingsDefinitions(), - RequestExecutorServiceSettings.getSettingsDefinitions(), - List.of(SKIP_VALIDATE_AND_START) - ).flatMap(Collection::stream).collect(Collectors.toList()); + ArrayList> settings = new ArrayList<>(); + settings.addAll(HttpSettings.getSettingsDefinitions()); + settings.addAll(HttpClientManager.getSettingsDefinitions()); + settings.addAll(ThrottlerManager.getSettingsDefinitions()); + settings.addAll(RetrySettings.getSettingsDefinitions()); + settings.addAll(Truncator.getSettingsDefinitions()); + settings.addAll(RequestExecutorServiceSettings.getSettingsDefinitions()); + settings.add(SKIP_VALIDATE_AND_START); + + // Register Elastic Inference Service settings definitions if the corresponding feature flag is enabled. + if (isElasticInferenceServiceEnabled()) { + settings.addAll(ElasticInferenceServiceSettings.getSettingsDefinitions()); + } + + return settings; } @Override @@ -466,7 +474,10 @@ public List getQueryRewriteInterceptors() { @Override public List> getRetrievers() { return List.of( - new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent), + new RetrieverSpec<>( + new ParseField(TextSimilarityRankBuilder.NAME), + (parser, context) -> TextSimilarityRankRetrieverBuilder.fromXContent(parser, context, getLicenseState()) + ), new RetrieverSpec<>(new ParseField(RandomRankBuilder.NAME), RandomRankRetrieverBuilder::fromXContent) ); } @@ -475,4 +486,36 @@ public List> getRetrievers() { public Map getHighlighters() { return Map.of(SemanticTextHighlighter.NAME, new SemanticTextHighlighter()); } + + // Get Elastic Inference service URL based on feature flags to support transitioning + // to the new Elastic Inference Service URL. + private String getElasticInferenceServiceUrl(ElasticInferenceServiceSettings settings) { + String elasticInferenceUrl = null; + + if (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + elasticInferenceUrl = settings.getElasticInferenceServiceUrl(); + } else if (DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + log.warn( + "Deprecated flag {} detected for enabling {}. Please use {}.", + ELASTIC_INFERENCE_SERVICE_IDENTIFIER, + DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG, + ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG + ); + elasticInferenceUrl = settings.getEisGatewayUrl(); + } + + return elasticInferenceUrl; + } + + protected Boolean isElasticInferenceServiceEnabled() { + return (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() || DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()); + } + + protected SSLService getSslService() { + return XPackPlugin.getSharedSslService(); + } + + protected XPackLicenseState getLicenseState() { + return XPackPlugin.getSharedLicenseState(); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java index e5d76b9bb5570..6d09c9e67b363 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java @@ -7,9 +7,14 @@ package org.elasticsearch.xpack.inference.external.http; +import org.apache.http.config.Registry; +import org.apache.http.config.RegistryBuilder; import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; import org.apache.http.impl.nio.reactor.IOReactorConfig; +import org.apache.http.nio.conn.NoopIOSessionStrategy; +import org.apache.http.nio.conn.SchemeIOSessionStrategy; +import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.nio.reactor.ConnectingIOReactor; import org.apache.http.nio.reactor.IOReactorException; import org.apache.http.pool.PoolStats; @@ -21,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import java.io.Closeable; @@ -28,11 +34,13 @@ import java.util.List; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX; public class HttpClientManager implements Closeable { private static final Logger logger = LogManager.getLogger(HttpClientManager.class); /** * The maximum number of total connections the connection pool can lease to all routes. + * The configuration applies to each instance of HTTPClientManager (max_total_connections=10 and instances=5 leads to 50 connections). * From googling around the connection pools maxTotal value should be close to the number of available threads. * * https://stackoverflow.com/questions/30989637/how-to-decide-optimal-settings-for-setmaxtotal-and-setdefaultmaxperroute @@ -47,6 +55,7 @@ public class HttpClientManager implements Closeable { /** * The max number of connections a single route can lease. + * This configuration applies to each instance of HttpClientManager. */ public static final Setting MAX_ROUTE_CONNECTIONS = Setting.intSetting( "xpack.inference.http.max_route_connections", @@ -98,6 +107,22 @@ public static HttpClientManager create( return new HttpClientManager(settings, connectionManager, threadPool, clusterService, throttlerManager); } + public static HttpClientManager create( + Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + ThrottlerManager throttlerManager, + SSLService sslService + ) { + // Set the sslStrategy to ensure an encrypted connection, as Elastic Inference Service requires it. + SSLIOSessionStrategy sslioSessionStrategy = sslService.sslIOSessionStrategy( + sslService.getSSLConfiguration(ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX) + ); + + PoolingNHttpClientConnectionManager connectionManager = createConnectionManager(sslioSessionStrategy); + return new HttpClientManager(settings, connectionManager, threadPool, clusterService, throttlerManager); + } + // Default for testing HttpClientManager( Settings settings, @@ -121,6 +146,25 @@ public static HttpClientManager create( this.addSettingsUpdateConsumers(clusterService); } + private static PoolingNHttpClientConnectionManager createConnectionManager(SSLIOSessionStrategy sslStrategy) { + ConnectingIOReactor ioReactor; + try { + var configBuilder = IOReactorConfig.custom().setSoKeepAlive(true); + ioReactor = new DefaultConnectingIOReactor(configBuilder.build()); + } catch (IOReactorException e) { + var message = "Failed to initialize HTTP client manager with SSL."; + logger.error(message, e); + throw new ElasticsearchException(message, e); + } + + Registry registry = RegistryBuilder.create() + .register("http", NoopIOSessionStrategy.INSTANCE) + .register("https", sslStrategy) + .build(); + + return new PoolingNHttpClientConnectionManager(ioReactor, registry); + } + private static PoolingNHttpClientConnectionManager createConnectionManager() { ConnectingIOReactor ioReactor; try { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index 0d2cae9335b74..115d0d7947de2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -104,8 +104,6 @@ * A {@link FieldMapper} for semantic text fields. */ public class SemanticTextFieldMapper extends FieldMapper implements InferenceFieldMapper { - public static final NodeFeature SEMANTIC_TEXT_SEARCH_INFERENCE_ID = new NodeFeature("semantic_text.search_inference_id", true); - public static final NodeFeature SEMANTIC_TEXT_DEFAULT_ELSER_2 = new NodeFeature("semantic_text.default_elser_2", true); public static final NodeFeature SEMANTIC_TEXT_IN_OBJECT_FIELD_FIX = new NodeFeature("semantic_text.in_object_field_fix"); public static final NodeFeature SEMANTIC_TEXT_SINGLE_FIELD_UPDATE_FIX = new NodeFeature("semantic_text.single_field_update_fix"); public static final NodeFeature SEMANTIC_TEXT_DELETE_FIX = new NodeFeature("semantic_text.delete_fix"); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java index dbf41cc8b25ed..285739fe0936f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; @@ -51,9 +50,6 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; public class SemanticQueryBuilder extends AbstractQueryBuilder { - // **** THE semantic_text.inner_hits CLUSTER FEATURE IS DEFUNCT, NEVER USE IT **** - public static final NodeFeature SEMANTIC_TEXT_INNER_HITS = new NodeFeature("semantic_text.inner_hits", true); - public static final String NAME = "semantic"; private static final ParseField FIELD_FIELD = new ParseField("field"); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java index 503000c31f7e7..bdc833f0c3f40 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.inference.rank.random; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; @@ -31,8 +30,6 @@ */ public class RandomRankRetrieverBuilder extends RetrieverBuilder { - public static final NodeFeature RANDOM_RERANKER_RETRIEVER_SUPPORTED = new NodeFeature("random_reranker_retriever_supported", true); - public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField RANK_WINDOW_SIZE_FIELD = new ParseField("rank_window_size"); @@ -63,9 +60,6 @@ public class RandomRankRetrieverBuilder extends RetrieverBuilder { } public static RandomRankRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { - if (context.clusterSupportsFeature(RANDOM_RERANKER_RETRIEVER_SUPPORTED) == false) { - throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + RandomRankBuilder.NAME + "]"); - } return PARSER.apply(parser, context); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index bd4c9c1a8be42..42248d246d3da 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -8,10 +8,10 @@ package org.elasticsearch.xpack.inference.rank.textsimilarity; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; @@ -21,7 +21,6 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; import java.util.List; @@ -36,17 +35,8 @@ */ public class TextSimilarityRankRetrieverBuilder extends CompoundRetrieverBuilder { - public static final NodeFeature TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED = new NodeFeature( - "text_similarity_reranker_retriever_supported", - true - ); - public static final NodeFeature TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED = new NodeFeature( - "text_similarity_reranker_retriever_composition_supported", - true - ); public static final NodeFeature TEXT_SIMILARITY_RERANKER_ALIAS_HANDLING_FIX = new NodeFeature( - "text_similarity_reranker_alias_handling_fix", - true + "text_similarity_reranker_alias_handling_fix" ); public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); @@ -78,17 +68,12 @@ public class TextSimilarityRankRetrieverBuilder extends CompoundRetrieverBuilder RetrieverBuilder.declareBaseParserFields(TextSimilarityRankBuilder.NAME, PARSER); } - public static TextSimilarityRankRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) - throws IOException { - if (context.clusterSupportsFeature(TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED) == false) { - throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + TextSimilarityRankBuilder.NAME + "]"); - } - if (context.clusterSupportsFeature(TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED) == false) { - throw new IllegalArgumentException( - "[text_similarity_reranker] retriever composition feature is not supported by all nodes in the cluster" - ); - } - if (TextSimilarityRankBuilder.TEXT_SIMILARITY_RERANKER_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { + public static TextSimilarityRankRetrieverBuilder fromXContent( + XContentParser parser, + RetrieverParserContext context, + XPackLicenseState licenceState + ) throws IOException { + if (TextSimilarityRankBuilder.TEXT_SIMILARITY_RERANKER_FEATURE.check(licenceState) == false) { throw LicenseUtils.newComplianceException(TextSimilarityRankBuilder.NAME); } return PARSER.apply(parser, context); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java index c46f211bb26af..57c06df8d8dfe 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java @@ -30,12 +30,15 @@ public final class Paths { + "}/{" + INFERENCE_ID + "}/_stream"; - static final String UNIFIED_INFERENCE_ID_PATH = "_inference/{" + TASK_TYPE_OR_INFERENCE_ID + "}/_unified"; + + public static final String UNIFIED_SUFFIX = "_unified"; + static final String UNIFIED_INFERENCE_ID_PATH = "_inference/{" + TASK_TYPE_OR_INFERENCE_ID + "}/" + UNIFIED_SUFFIX; static final String UNIFIED_TASK_TYPE_INFERENCE_ID_PATH = "_inference/{" + TASK_TYPE_OR_INFERENCE_ID + "}/{" + INFERENCE_ID - + "}/_unified"; + + "}/" + + UNIFIED_SUFFIX; private Paths() { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index 7ea68b2c9bf1e..ac6e57d31b740 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -73,7 +73,7 @@ public void infer( private static InferenceInputs createInput(Model model, List input, @Nullable String query, boolean stream) { return switch (model.getTaskType()) { - case COMPLETION -> new ChatCompletionInput(input, stream); + case COMPLETION, CHAT_COMPLETION -> new ChatCompletionInput(input, stream); case RERANK -> new QueryAndDocsInputs(query, input, stream); case TEXT_EMBEDDING, SPARSE_EMBEDDING -> new DocumentsOnlyInput(input, stream); default -> throw new ElasticsearchStatusException( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index 7d05bac363fb1..1ddae3cc8df95 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -42,6 +42,7 @@ import static org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings.ENABLED; import static org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings.MAX_NUMBER_OF_ALLOCATIONS; import static org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings.MIN_NUMBER_OF_ALLOCATIONS; +import static org.elasticsearch.xpack.inference.rest.Paths.UNIFIED_SUFFIX; import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; public final class ServiceUtils { @@ -780,5 +781,24 @@ public static void throwUnsupportedUnifiedCompletionOperation(String serviceName throw new UnsupportedOperationException(Strings.format("The %s service does not support unified completion", serviceName)); } + public static String unsupportedTaskTypeForInference(Model model, EnumSet supportedTaskTypes) { + return Strings.format( + "Inference entity [%s] does not support task type [%s] for inference, the task type must be one of %s.", + model.getInferenceEntityId(), + model.getTaskType(), + supportedTaskTypes + ); + } + + public static String useChatCompletionUrlMessage(Model model) { + return org.elasticsearch.common.Strings.format( + "The task type for the inference entity is %s, please use the _inference/%s/%s/%s URL.", + model.getTaskType(), + model.getTaskType(), + model.getInferenceEntityId(), + UNIFIED_SUFFIX + ); + } + private ServiceUtils() {} } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java index 48416faac6a06..cb554cf288121 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java @@ -27,6 +27,7 @@ import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.ChunkedInferenceEmbeddingSparse; import org.elasticsearch.xpack.core.inference.results.ChunkedInferenceError; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; @@ -41,6 +42,7 @@ import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.elastic.completion.ElasticInferenceServiceCompletionModel; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import org.elasticsearch.xpack.inference.telemetry.TraceContext; @@ -61,6 +63,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.useChatCompletionUrlMessage; public class ElasticInferenceService extends SenderService { @@ -69,8 +72,16 @@ public class ElasticInferenceService extends SenderService { private final ElasticInferenceServiceComponents elasticInferenceServiceComponents; - private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.SPARSE_EMBEDDING, TaskType.COMPLETION); + // The task types exposed via the _inference/_services API + private static final EnumSet SUPPORTED_TASK_TYPES_FOR_SERVICES_API = EnumSet.of( + TaskType.SPARSE_EMBEDDING, + TaskType.CHAT_COMPLETION + ); private static final String SERVICE_NAME = "Elastic"; + /** + * The task types that the {@link InferenceAction.Request} can accept. + */ + private static final EnumSet SUPPORTED_INFERENCE_ACTION_TASK_TYPES = EnumSet.of(TaskType.SPARSE_EMBEDDING); public ElasticInferenceService( HttpRequestSender.Factory factory, @@ -83,7 +94,7 @@ public ElasticInferenceService( @Override public Set supportedStreamingTasks() { - return COMPLETION_ONLY; + return EnumSet.of(TaskType.CHAT_COMPLETION, TaskType.ANY); } @Override @@ -129,6 +140,15 @@ protected void doInfer( TimeValue timeout, ActionListener listener ) { + if (SUPPORTED_INFERENCE_ACTION_TASK_TYPES.contains(model.getTaskType()) == false) { + var responseString = ServiceUtils.unsupportedTaskTypeForInference(model, SUPPORTED_INFERENCE_ACTION_TASK_TYPES); + + if (model.getTaskType() == TaskType.CHAT_COMPLETION) { + responseString = responseString + " " + useChatCompletionUrlMessage(model); + } + listener.onFailure(new ElasticsearchStatusException(responseString, RestStatus.BAD_REQUEST)); + } + if (model instanceof ElasticInferenceServiceExecutableActionModel == false) { listener.onFailure(createInvalidModelException(model)); return; @@ -207,7 +227,7 @@ public InferenceServiceConfiguration getConfiguration() { @Override public EnumSet supportedTaskTypes() { - return supportedTaskTypes; + return SUPPORTED_TASK_TYPES_FOR_SERVICES_API; } private static ElasticInferenceServiceModel createModel( @@ -383,7 +403,7 @@ public static InferenceServiceConfiguration get() { return new InferenceServiceConfiguration.Builder().setService(NAME) .setName(SERVICE_NAME) - .setTaskTypes(supportedTaskTypes) + .setTaskTypes(SUPPORTED_TASK_TYPES_FOR_SERVICES_API) .setConfigurations(configurationMap) .build(); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java index 324c20d0e48bf..530efee4a3d45 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java @@ -11,7 +11,7 @@ /** * Elastic Inference Service (EIS) feature flag. When the feature is complete, this flag will be removed. - * Enable feature via JVM option: `-Des.eis_feature_flag_enabled=true`. + * Enable feature via JVM option: `-Des.elastic_inference_service_feature_flag_enabled=true`. */ public class ElasticInferenceServiceFeature { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java index 5146cec1552af..0c1a032dc8926 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java @@ -9,7 +9,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import java.util.ArrayList; import java.util.List; /** @@ -21,6 +23,8 @@ public class ElasticInferenceServiceSettings { @Deprecated static final Setting EIS_GATEWAY_URL = Setting.simpleString("xpack.inference.eis.gateway.url", Setting.Property.NodeScope); + public static final String ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX = "xpack.inference.elastic.http.ssl."; + static final Setting ELASTIC_INFERENCE_SERVICE_URL = Setting.simpleString( "xpack.inference.elastic.url", Setting.Property.NodeScope @@ -35,11 +39,27 @@ public class ElasticInferenceServiceSettings { public ElasticInferenceServiceSettings(Settings settings) { eisGatewayUrl = EIS_GATEWAY_URL.get(settings); elasticInferenceServiceUrl = ELASTIC_INFERENCE_SERVICE_URL.get(settings); - } + public static final SSLConfigurationSettings ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_SETTINGS = SSLConfigurationSettings.withPrefix( + ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX, + false + ); + + public static final Setting ELASTIC_INFERENCE_SERVICE_SSL_ENABLED = Setting.boolSetting( + ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX + "enabled", + true, + Setting.Property.NodeScope + ); + public static List> getSettingsDefinitions() { - return List.of(EIS_GATEWAY_URL, ELASTIC_INFERENCE_SERVICE_URL); + ArrayList> settings = new ArrayList<>(); + settings.add(EIS_GATEWAY_URL); + settings.add(ELASTIC_INFERENCE_SERVICE_URL); + settings.add(ELASTIC_INFERENCE_SERVICE_SSL_ENABLED); + settings.addAll(ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_SETTINGS.getEnabledSettings()); + + return settings; } @Deprecated diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index ba9dea8ace8ee..3efd7c44c3e97 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -27,6 +27,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.SenderExecutableAction; @@ -63,6 +64,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.useChatCompletionUrlMessage; import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.EMBEDDING_MAX_BATCH_SIZE; import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.ORGANIZATION; @@ -70,7 +72,16 @@ public class OpenAiService extends SenderService { public static final String NAME = "openai"; private static final String SERVICE_NAME = "OpenAI"; - private static final EnumSet supportedTaskTypes = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.COMPLETION); + // The task types exposed via the _inference/_services API + private static final EnumSet SUPPORTED_TASK_TYPES_FOR_SERVICES_API = EnumSet.of( + TaskType.TEXT_EMBEDDING, + TaskType.COMPLETION, + TaskType.CHAT_COMPLETION + ); + /** + * The task types that the {@link InferenceAction.Request} can accept. + */ + private static final EnumSet SUPPORTED_INFERENCE_ACTION_TASK_TYPES = EnumSet.of(TaskType.TEXT_EMBEDDING, TaskType.COMPLETION); public OpenAiService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); @@ -164,7 +175,7 @@ private static OpenAiModel createModel( secretSettings, context ); - case COMPLETION -> new OpenAiChatCompletionModel( + case COMPLETION, CHAT_COMPLETION -> new OpenAiChatCompletionModel( inferenceEntityId, taskType, NAME, @@ -236,7 +247,7 @@ public InferenceServiceConfiguration getConfiguration() { @Override public EnumSet supportedTaskTypes() { - return supportedTaskTypes; + return SUPPORTED_TASK_TYPES_FOR_SERVICES_API; } @Override @@ -248,6 +259,15 @@ public void doInfer( TimeValue timeout, ActionListener listener ) { + if (SUPPORTED_INFERENCE_ACTION_TASK_TYPES.contains(model.getTaskType()) == false) { + var responseString = ServiceUtils.unsupportedTaskTypeForInference(model, SUPPORTED_INFERENCE_ACTION_TASK_TYPES); + + if (model.getTaskType() == TaskType.CHAT_COMPLETION) { + responseString = responseString + " " + useChatCompletionUrlMessage(model); + } + listener.onFailure(new ElasticsearchStatusException(responseString, RestStatus.BAD_REQUEST)); + } + if (model instanceof OpenAiModel == false) { listener.onFailure(createInvalidModelException(model)); return; @@ -356,7 +376,7 @@ public TransportVersion getMinimalSupportedVersion() { @Override public Set supportedStreamingTasks() { - return COMPLETION_ONLY; + return EnumSet.of(TaskType.COMPLETION, TaskType.CHAT_COMPLETION, TaskType.ANY); } /** @@ -444,7 +464,7 @@ public static InferenceServiceConfiguration get() { return new InferenceServiceConfiguration.Builder().setService(NAME) .setName(SERVICE_NAME) - .setTaskTypes(supportedTaskTypes) + .setTaskTypes(SUPPORTED_TASK_TYPES_FOR_SERVICES_API) .setConfigurations(configurationMap) .build(); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java index a595134ecd548..e49d98d48eb0c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java @@ -14,8 +14,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.inference.InferencePlugin; +import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.hamcrest.Matchers; import java.util.Arrays; @@ -29,7 +28,7 @@ public class SemanticTextClusterMetadataTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(InferencePlugin.class, LocalStateCompositeXPackPlugin.class); + return List.of(LocalStateInferencePlugin.class); } public void testCreateIndexWithSemanticTextField() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java new file mode 100644 index 0000000000000..d1db5b8b12cc6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSettings; +import org.junit.After; +import org.junit.Before; + +import static org.hamcrest.Matchers.is; + +public class InferencePluginTests extends ESTestCase { + private InferencePlugin inferencePlugin; + + private Boolean elasticInferenceServiceEnabled = true; + + private void setElasticInferenceServiceEnabled(Boolean elasticInferenceServiceEnabled) { + this.elasticInferenceServiceEnabled = elasticInferenceServiceEnabled; + } + + @Before + public void setUp() throws Exception { + super.setUp(); + + Settings settings = Settings.builder().build(); + inferencePlugin = new InferencePlugin(settings) { + @Override + protected Boolean isElasticInferenceServiceEnabled() { + return elasticInferenceServiceEnabled; + } + }; + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + public void testElasticInferenceServiceSettingsPresent() throws Exception { + setElasticInferenceServiceEnabled(true); // enable elastic inference service + boolean anyMatch = inferencePlugin.getSettings() + .stream() + .map(Setting::getKey) + .anyMatch(key -> key.startsWith(ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX)); + + assertThat("xpack.inference.elastic settings are present", anyMatch, is(true)); + } + + public void testElasticInferenceServiceSettingsNotPresent() throws Exception { + setElasticInferenceServiceEnabled(false); // disable elastic inference service + boolean noneMatch = inferencePlugin.getSettings() + .stream() + .map(Setting::getKey) + .noneMatch(key -> key.startsWith(ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX)); + + assertThat("xpack.inference.elastic settings are not present", noneMatch, is(true)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java new file mode 100644 index 0000000000000..68ea175bd9870 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference; + +import org.elasticsearch.action.support.MappedActionFilter; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.inference.InferenceServiceExtension; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension; +import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; + +import java.nio.file.Path; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static java.util.stream.Collectors.toList; + +public class LocalStateInferencePlugin extends LocalStateCompositeXPackPlugin { + private final InferencePlugin inferencePlugin; + + public LocalStateInferencePlugin(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + LocalStateInferencePlugin thisVar = this; + this.inferencePlugin = new InferencePlugin(settings) { + @Override + protected SSLService getSslService() { + return thisVar.getSslService(); + } + + @Override + protected XPackLicenseState getLicenseState() { + return thisVar.getLicenseState(); + } + + @Override + public List getInferenceServiceFactories() { + return List.of( + TestSparseInferenceServiceExtension.TestInferenceService::new, + TestDenseInferenceServiceExtension.TestInferenceService::new + ); + } + }; + plugins.add(inferencePlugin); + } + + @Override + public List> getRetrievers() { + return this.filterPlugins(SearchPlugin.class).stream().flatMap(p -> p.getRetrievers().stream()).collect(toList()); + } + + @Override + public Map getMappers() { + return inferencePlugin.getMappers(); + } + + @Override + public Collection getMappedActionFilters() { + return inferencePlugin.getMappedActionFilters(); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index e02ac7b8853ad..1260b89034e6b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; -import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; @@ -149,33 +148,24 @@ private static void blockingCall( latch.await(); } - public static class TestInferencePlugin extends InferencePlugin { - public TestInferencePlugin(Settings settings) { - super(settings); - } - - @Override - public List getInferenceServiceFactories() { - return List.of( - TestSparseInferenceServiceExtension.TestInferenceService::new, - TestDenseInferenceServiceExtension.TestInferenceService::new - ); - } - } - - public static Model getInvalidModel(String inferenceEntityId, String serviceName) { + public static Model getInvalidModel(String inferenceEntityId, String serviceName, TaskType taskType) { var mockConfigs = mock(ModelConfigurations.class); when(mockConfigs.getInferenceEntityId()).thenReturn(inferenceEntityId); when(mockConfigs.getService()).thenReturn(serviceName); - when(mockConfigs.getTaskType()).thenReturn(TaskType.TEXT_EMBEDDING); + when(mockConfigs.getTaskType()).thenReturn(taskType); var mockModel = mock(Model.class); + when(mockModel.getInferenceEntityId()).thenReturn(inferenceEntityId); when(mockModel.getConfigurations()).thenReturn(mockConfigs); - when(mockModel.getTaskType()).thenReturn(TaskType.TEXT_EMBEDDING); + when(mockModel.getTaskType()).thenReturn(taskType); return mockModel; } + public static Model getInvalidModel(String inferenceEntityId, String serviceName) { + return getInvalidModel(inferenceEntityId, serviceName, TaskType.TEXT_EMBEDDING); + } + public static SimilarityMeasure randomSimilarityMeasure() { return randomFrom(SimilarityMeasure.values()); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java index 0432a2ff3fc9e..1fca17f77ad9a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java @@ -31,11 +31,13 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.inference.ChunkedInference; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceRegistry; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.inference.UnparsedModel; import org.elasticsearch.rest.RestStatus; @@ -582,7 +584,7 @@ private static class StaticModel extends TestModel { } public static StaticModel createRandomInstance() { - TestModel testModel = TestModel.createRandomInstance(); + TestModel testModel = randomModel(randomFrom(TaskType.TEXT_EMBEDDING, TaskType.SPARSE_EMBEDDING)); return new StaticModel( testModel.getInferenceEntityId(), testModel.getTaskType(), @@ -605,4 +607,18 @@ boolean hasResult(String text) { return resultMap.containsKey(text); } } + + private static TestModel randomModel(TaskType taskType) { + var dimensions = taskType == TaskType.TEXT_EMBEDDING ? randomIntBetween(2, 64) : null; + var similarity = taskType == TaskType.TEXT_EMBEDDING ? randomFrom(SimilarityMeasure.values()) : null; + var elementType = taskType == TaskType.TEXT_EMBEDDING ? DenseVectorFieldMapper.ElementType.FLOAT : null; + return new TestModel( + randomAlphaOfLength(4), + taskType, + randomAlphaOfLength(10), + new TestModel.TestServiceSettings(randomAlphaOfLength(4), dimensions, similarity, elementType), + new TestModel.TestTaskSettings(randomInt(3)), + new TestModel.TestSecretSettings(randomAlphaOfLength(4)) + ); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapperTests.java similarity index 97% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapperTests.java index 8fcc0df0093ce..57d71a48a4aeb 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapperTests.java @@ -23,7 +23,7 @@ import java.util.Collection; import java.util.Collections; -public class SemanticInferenceMetadataFieldMapperTests extends MapperServiceTestCase { +public class SemanticInferenceMetadataFieldsMapperTests extends MapperServiceTestCase { @Override protected Collection getPlugins() { return Collections.singletonList(new InferencePlugin(Settings.EMPTY)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsRecoveryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsRecoveryTests.java new file mode 100644 index 0000000000000..2fa6d520fcc6f --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsRecoveryTests.java @@ -0,0 +1,281 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mapper; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineTestCase; +import org.elasticsearch.index.engine.LuceneChangesSnapshot; +import org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshot; +import org.elasticsearch.index.engine.SearchBasedChangesSnapshot; +import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.inference.ChunkedInference; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.inference.InferencePlugin; +import org.elasticsearch.xpack.inference.model.TestModel; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomChunkedInferenceEmbeddingByte; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomChunkedInferenceEmbeddingSparse; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.semanticTextFieldFromChunkedInferenceResults; +import static org.hamcrest.Matchers.equalTo; + +public class SemanticInferenceMetadataFieldsRecoveryTests extends EngineTestCase { + private final Model model1; + private final Model model2; + private final boolean useSynthetic; + private final boolean useIncludesExcludes; + + public SemanticInferenceMetadataFieldsRecoveryTests(boolean useSynthetic, boolean useIncludesExcludes) { + this.model1 = randomModel(TaskType.TEXT_EMBEDDING); + this.model2 = randomModel(TaskType.SPARSE_EMBEDDING); + this.useSynthetic = useSynthetic; + this.useIncludesExcludes = useIncludesExcludes; + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return List.of(new Object[] { false, false }, new Object[] { false, true }, new Object[] { true, false }); + } + + @Override + protected List extraMappers() { + return List.of(new InferencePlugin(Settings.EMPTY)); + } + + @Override + protected Settings indexSettings() { + var builder = Settings.builder() + .put(super.indexSettings()) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), false); + if (useSynthetic) { + builder.put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name()); + builder.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true); + } + return builder.build(); + } + + @Override + protected String defaultMapping() { + XContentBuilder builder = null; + try { + builder = JsonXContent.contentBuilder().startObject(); + if (useIncludesExcludes) { + builder.startObject(SourceFieldMapper.NAME).array("excludes", "field").endObject(); + } + builder.field("dynamic", false); + builder.startObject("properties"); + + builder.startObject("field"); + builder.field("type", "keyword"); + builder.endObject(); + + builder.startObject("semantic_1"); + builder.field("type", "semantic_text"); + builder.field("inference_id", model1.getInferenceEntityId()); + builder.startObject("model_settings"); + builder.field("task_type", model1.getTaskType().name()); + builder.field("dimensions", model1.getServiceSettings().dimensions()); + builder.field("similarity", model1.getServiceSettings().similarity().name()); + builder.field("element_type", model1.getServiceSettings().elementType().name()); + builder.endObject(); + builder.endObject(); + + builder.startObject("semantic_2"); + builder.field("type", "semantic_text"); + builder.field("inference_id", model2.getInferenceEntityId()); + builder.startObject("model_settings"); + builder.field("task_type", model2.getTaskType().name()); + builder.endObject(); + builder.endObject(); + + builder.endObject(); + builder.endObject(); + return BytesReference.bytes(builder).utf8ToString(); + } catch (IOException exc) { + throw new RuntimeException(exc); + } + } + + public void testSnapshotRecovery() throws IOException { + List expectedOperations = new ArrayList<>(); + int size = randomIntBetween(10, 50); + for (int i = 0; i < size; i++) { + var source = randomSource(); + var sourceToParse = new SourceToParse(Integer.toString(i), source, XContentType.JSON, null); + var doc = mapperService.documentMapper().parse(sourceToParse); + assertNull(doc.dynamicMappingsUpdate()); + if (useSynthetic) { + assertNull(doc.rootDoc().getField(SourceFieldMapper.RECOVERY_SOURCE_NAME)); + assertNotNull(doc.rootDoc().getField(SourceFieldMapper.RECOVERY_SOURCE_SIZE_NAME)); + } else { + if (useIncludesExcludes) { + assertNotNull(doc.rootDoc().getField(SourceFieldMapper.RECOVERY_SOURCE_NAME)); + var originalSource = new BytesArray(doc.rootDoc().getField(SourceFieldMapper.RECOVERY_SOURCE_NAME).binaryValue()); + var map = XContentHelper.convertToMap(originalSource, false, XContentType.JSON); + assertThat(map.v2().size(), equalTo(1)); + assertNull(map.v2().remove(InferenceMetadataFieldsMapper.NAME)); + } else { + assertNull(doc.rootDoc().getField(SourceFieldMapper.RECOVERY_SOURCE_NAME)); + } + } + var op = indexForDoc(doc); + expectedOperations.add(op); + engine.index(op); + if (frequently()) { + engine.flush(); + } + } + engine.flush(); + + var searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try ( + var snapshot = newRandomSnapshot( + engine.config().getMapperService(), + searcher, + SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE, + 0, + size - 1, + true, + randomBoolean(), + randomBoolean(), + IndexVersion.current() + ) + ) { + for (int i = 0; i < size; i++) { + var op = snapshot.next(); + assertThat(op.opType(), equalTo(Translog.Operation.Type.INDEX)); + Translog.Index indexOp = (Translog.Index) op; + assertThat(indexOp.id(), equalTo(expectedOperations.get(i).id())); + assertThat(indexOp.routing(), equalTo(expectedOperations.get(i).routing())); + assertToXContentEquivalent(indexOp.source(), expectedOperations.get(i).source(), XContentType.JSON); + } + assertNull(snapshot.next()); + } + } + + private Translog.Snapshot newRandomSnapshot( + MapperService mapperService, + Engine.Searcher engineSearcher, + int searchBatchSize, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean singleConsumer, + boolean accessStats, + IndexVersion indexVersionCreated + ) throws IOException { + if (useSynthetic) { + return new LuceneSyntheticSourceChangesSnapshot( + mapperService, + engineSearcher, + searchBatchSize, + randomLongBetween(0, ByteSizeValue.ofBytes(Integer.MAX_VALUE).getBytes()), + fromSeqNo, + toSeqNo, + requiredFullRange, + accessStats, + indexVersionCreated + ); + } else { + return new LuceneChangesSnapshot( + mapperService, + engineSearcher, + searchBatchSize, + fromSeqNo, + toSeqNo, + requiredFullRange, + singleConsumer, + accessStats, + indexVersionCreated + ); + } + } + + private static Model randomModel(TaskType taskType) { + var dimensions = taskType == TaskType.TEXT_EMBEDDING ? randomIntBetween(2, 64) : null; + var similarity = taskType == TaskType.TEXT_EMBEDDING ? randomFrom(SimilarityMeasure.values()) : null; + var elementType = taskType == TaskType.TEXT_EMBEDDING ? DenseVectorFieldMapper.ElementType.BYTE : null; + return new TestModel( + randomAlphaOfLength(4), + taskType, + randomAlphaOfLength(10), + new TestModel.TestServiceSettings(randomAlphaOfLength(4), dimensions, similarity, elementType), + new TestModel.TestTaskSettings(randomInt(3)), + new TestModel.TestSecretSettings(randomAlphaOfLength(4)) + ); + } + + private BytesReference randomSource() throws IOException { + var builder = JsonXContent.contentBuilder().startObject(); + builder.field("field", randomAlphaOfLengthBetween(10, 30)); + if (rarely()) { + return BytesReference.bytes(builder.endObject()); + } + SemanticTextFieldMapperTests.addSemanticTextInferenceResults( + false, + builder, + List.of( + randomSemanticText(false, "semantic_2", model2, randomInputs(), XContentType.JSON), + randomSemanticText(false, "semantic_1", model1, randomInputs(), XContentType.JSON) + ) + ); + builder.endObject(); + return BytesReference.bytes(builder); + } + + private static SemanticTextField randomSemanticText( + boolean useLegacyFormat, + String fieldName, + Model model, + List inputs, + XContentType contentType + ) throws IOException { + ChunkedInference results = switch (model.getTaskType()) { + case TEXT_EMBEDDING -> switch (model.getServiceSettings().elementType()) { + case BYTE -> randomChunkedInferenceEmbeddingByte(model, inputs); + default -> throw new AssertionError("invalid element type: " + model.getServiceSettings().elementType().name()); + }; + case SPARSE_EMBEDDING -> randomChunkedInferenceEmbeddingSparse(inputs, false); + default -> throw new AssertionError("invalid task type: " + model.getTaskType().name()); + }; + return semanticTextFieldFromChunkedInferenceResults(useLegacyFormat, fieldName, model, inputs, results, contentType); + } + + private static List randomInputs() { + int size = randomIntBetween(1, 5); + List resp = new ArrayList<>(); + for (int i = 0; i < size; i++) { + resp.add(randomAlphaOfLengthBetween(10, 50)); + } + return resp; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java index b056f5880b21a..881bde41a30c5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java @@ -115,7 +115,7 @@ private MapperService createMapperService(XContentBuilder mappings, boolean useL var settings = Settings.builder() .put( IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), - SemanticInferenceMetadataFieldMapperTests.getRandomCompatibleIndexVersion(useLegacyFormat) + SemanticInferenceMetadataFieldsMapperTests.getRandomCompatibleIndexVersion(useLegacyFormat) ) .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), useLegacyFormat) .build(); @@ -920,7 +920,7 @@ private static void addSemanticTextMapping( mappingBuilder.endObject(); } - private static void addSemanticTextInferenceResults( + public static void addSemanticTextInferenceResults( boolean useLegacyFormat, XContentBuilder sourceBuilder, List semanticTextInferenceResults diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldTests.java index 29ca71d38e1b2..6a25ed506c2a3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.ChunkedInferenceEmbeddingByte; import org.elasticsearch.xpack.core.inference.results.ChunkedInferenceEmbeddingFloat; import org.elasticsearch.xpack.core.inference.results.ChunkedInferenceEmbeddingSparse; import org.elasticsearch.xpack.core.ml.search.WeightedToken; @@ -183,12 +184,26 @@ public void testModelSettingsValidation() { assertThat(ex.getMessage(), containsString("required [element_type] field is missing")); } + public static ChunkedInferenceEmbeddingByte randomChunkedInferenceEmbeddingByte(Model model, List inputs) { + List chunks = new ArrayList<>(); + for (String input : inputs) { + byte[] values = new byte[model.getServiceSettings().dimensions()]; + for (int j = 0; j < values.length; j++) { + values[j] = randomByte(); + } + chunks.add( + new ChunkedInferenceEmbeddingByte.ByteEmbeddingChunk(values, input, new ChunkedInference.TextOffset(0, input.length())) + ); + } + return new ChunkedInferenceEmbeddingByte(chunks); + } + public static ChunkedInferenceEmbeddingFloat randomChunkedInferenceEmbeddingFloat(Model model, List inputs) { List chunks = new ArrayList<>(); for (String input : inputs) { float[] values = new float[model.getServiceSettings().dimensions()]; for (int j = 0; j < values.length; j++) { - values[j] = (float) randomDouble(); + values[j] = randomFloat(); } chunks.add( new ChunkedInferenceEmbeddingFloat.FloatEmbeddingChunk(values, input, new ChunkedInference.TextOffset(0, input.length())) @@ -198,11 +213,15 @@ public static ChunkedInferenceEmbeddingFloat randomChunkedInferenceEmbeddingFloa } public static ChunkedInferenceEmbeddingSparse randomChunkedInferenceEmbeddingSparse(List inputs) { + return randomChunkedInferenceEmbeddingSparse(inputs, true); + } + + public static ChunkedInferenceEmbeddingSparse randomChunkedInferenceEmbeddingSparse(List inputs, boolean withFloats) { List chunks = new ArrayList<>(); for (String input : inputs) { var tokens = new ArrayList(); for (var token : input.split("\\s+")) { - tokens.add(new WeightedToken(token, randomFloat())); + tokens.add(new WeightedToken(token, withFloats ? randomFloat() : randomIntBetween(1, 255))); } chunks.add( new ChunkedInferenceEmbeddingSparse.SparseEmbeddingChunk(tokens, input, new ChunkedInference.TextOffset(0, input.length())) @@ -219,7 +238,10 @@ public static SemanticTextField randomSemanticText( XContentType contentType ) throws IOException { ChunkedInference results = switch (model.getTaskType()) { - case TEXT_EMBEDDING -> randomChunkedInferenceEmbeddingFloat(model, inputs); + case TEXT_EMBEDDING -> switch (model.getServiceSettings().elementType()) { + case FLOAT -> randomChunkedInferenceEmbeddingFloat(model, inputs); + case BIT, BYTE -> randomChunkedInferenceEmbeddingByte(model, inputs); + }; case SPARSE_EMBEDDING -> randomChunkedInferenceEmbeddingSparse(inputs); default -> throw new AssertionError("invalid task type: " + model.getTaskType().name()); }; @@ -363,8 +385,8 @@ private static List validateAndGetMatchedTextForField( * the matched text corresponds to one complete input value (i.e. one input value -> one chunk) to calculate the offset values. * * @param useLegacyFormat Whether the old format should be used - * @param chunk The chunk to get/calculate offset values for - * @param matchedText The matched text to calculate offset values for + * @param chunk The chunk to get/calculate offset values for + * @param matchedText The matched text to calculate offset values for * @return A {@link ChunkedInference.TextOffset} instance with valid offset values */ private static ChunkedInference.TextOffset createOffset(boolean useLegacyFormat, SemanticTextField.Chunk chunk, String matchedText) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java index 0025b3a53a69f..24183b21f73e7 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.index.mapper.NonDynamicFieldMapperTests; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.elasticsearch.xpack.inference.Utils; import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; import org.junit.Before; @@ -27,7 +27,7 @@ public void setup() throws Exception { @Override protected Collection> getPlugins() { - return List.of(Utils.TestInferencePlugin.class, LocalStateCompositeXPackPlugin.class); + return List.of(LocalStateInferencePlugin.class); } @Override diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java index deb5fb47ab939..2d91877da97ba 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.rank.random; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.core.Predicates; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.TestRetrieverBuilder; @@ -51,10 +52,7 @@ protected RandomRankRetrieverBuilder createTestInstance() { protected RandomRankRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { return (RandomRankRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder( parser, - new RetrieverParserContext( - new SearchUsage(), - nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED - ) + new RetrieverParserContext(new SearchUsage(), Predicates.never()) ); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java index 69b1e19fa91de..daed03c198e0d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java @@ -10,8 +10,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.rank.RankBuilder; import org.elasticsearch.search.rank.rerank.AbstractRerankerIT; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.inference.InferencePlugin; +import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import java.util.Collection; import java.util.List; @@ -41,7 +40,7 @@ protected RankBuilder getThrowingRankBuilder(int rankWindowSize, String rankFeat @Override protected Collection> pluginsNeeded() { - return List.of(InferencePlugin.class, TextSimilarityTestPlugin.class, LocalStateCompositeXPackPlugin.class); + return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); } public void testQueryPhaseShardThrowingAllShardsFail() throws Exception { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java index 478f3b2f33c93..b6d455dd233ba 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -68,12 +69,7 @@ protected TextSimilarityRankRetrieverBuilder createTestInstance() { protected TextSimilarityRankRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { return (TextSimilarityRankRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder( parser, - new RetrieverParserContext( - new SearchUsage(), - nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED - || nf == TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED - || nf == TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED - ) + new RetrieverParserContext(new SearchUsage(), Predicates.never()) ); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java index 084a7f3de4a53..ba6924ba0ff3b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java @@ -24,8 +24,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.inference.InferencePlugin; +import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.junit.Before; import java.io.IOException; @@ -47,7 +46,7 @@ protected boolean addMockHttpTransport() { @Override protected Collection> nodePlugins() { - return List.of(InferencePlugin.class, XPackPlugin.class, TextSimilarityTestPlugin.class); + return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); } @Override diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java index a6a4ce2b2ffdf..f81f2965c392e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java @@ -19,9 +19,8 @@ import org.elasticsearch.search.rank.rerank.AbstractRerankerIT; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.inference.action.InferenceAction; -import org.elasticsearch.xpack.inference.InferencePlugin; +import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.junit.Before; import java.util.Collection; @@ -109,7 +108,7 @@ protected InferenceAction.Request generateRequest(List docFeatures) { @Override protected Collection> getPlugins() { - return List.of(InferencePlugin.class, TextSimilarityTestPlugin.class, LocalStateCompositeXPackPlugin.class); + return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); } @Before diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java index 8a826e99c3c04..89bf1355ee767 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java @@ -319,7 +319,7 @@ public void testInfer_ThrowsErrorWhenModelIsNotAValidModel() throws IOException var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender()).thenReturn(sender); - var mockModel = getInvalidModel("model_id", "service_name"); + var mockModel = getInvalidModel("model_id", "service_name", TaskType.SPARSE_EMBEDDING); try ( var service = new ElasticInferenceService( @@ -355,6 +355,98 @@ public void testInfer_ThrowsErrorWhenModelIsNotAValidModel() throws IOException verifyNoMoreInteractions(sender); } + public void testInfer_ThrowsErrorWhenTaskTypeIsNotValid() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var mockModel = getInvalidModel("model_id", "service_name", TaskType.TEXT_EMBEDDING); + + try ( + var service = new ElasticInferenceService( + factory, + createWithEmptySettings(threadPool), + new ElasticInferenceServiceComponents(null) + ) + ) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + mockModel, + null, + List.of(""), + false, + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + MatcherAssert.assertThat( + thrownException.getMessage(), + is( + "Inference entity [model_id] does not support task type [text_embedding] " + + "for inference, the task type must be one of [sparse_embedding]." + ) + ); + + verify(factory, times(1)).createSender(); + verify(sender, times(1)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_ThrowsErrorWhenTaskTypeIsNotValid_ChatCompletion() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var mockModel = getInvalidModel("model_id", "service_name", TaskType.CHAT_COMPLETION); + + try ( + var service = new ElasticInferenceService( + factory, + createWithEmptySettings(threadPool), + new ElasticInferenceServiceComponents(null) + ) + ) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + mockModel, + null, + List.of(""), + false, + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + MatcherAssert.assertThat( + thrownException.getMessage(), + is( + "Inference entity [model_id] does not support task type [chat_completion] " + + "for inference, the task type must be one of [sparse_embedding]. " + + "The task type for the inference entity is chat_completion, " + + "please use the _inference/chat_completion/model_id/_unified URL." + ) + ); + + verify(factory, times(1)).createSender(); + verify(sender, times(1)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + public void testInfer_SendsEmbeddingsRequest() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); var eisGatewayUrl = getUrl(webServer); @@ -482,7 +574,7 @@ public void testGetConfiguration() throws Exception { { "service": "elastic", "name": "Elastic", - "task_types": ["sparse_embedding" , "completion"], + "task_types": ["sparse_embedding", "chat_completion"], "configurations": { "rate_limit.requests_per_minute": { "description": "Minimize the number of rate limit errors.", diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index 678c4528a3f41..da912cd6e5d14 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -864,6 +864,86 @@ public void testInfer_ThrowsErrorWhenModelIsNotOpenAiModel() throws IOException verifyNoMoreInteractions(sender); } + public void testInfer_ThrowsErrorWhenTaskTypeIsNotValid() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var mockModel = getInvalidModel("model_id", "service_name", TaskType.SPARSE_EMBEDDING); + + try (var service = new OpenAiService(factory, createWithEmptySettings(threadPool))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + mockModel, + null, + List.of(""), + false, + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is( + "Inference entity [model_id] does not support task type [sparse_embedding] " + + "for inference, the task type must be one of [text_embedding, completion]." + ) + ); + + verify(factory, times(1)).createSender(); + verify(sender, times(1)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_ThrowsErrorWhenTaskTypeIsNotValid_ChatCompletion() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var mockModel = getInvalidModel("model_id", "service_name", TaskType.CHAT_COMPLETION); + + try (var service = new OpenAiService(factory, createWithEmptySettings(threadPool))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + mockModel, + null, + List.of(""), + false, + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is( + "Inference entity [model_id] does not support task type [chat_completion] " + + "for inference, the task type must be one of [text_embedding, completion]. " + + "The task type for the inference entity is chat_completion, " + + "please use the _inference/chat_completion/model_id/_unified URL." + ) + ); + + verify(factory, times(1)).createSender(); + verify(sender, times(1)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + public void testInfer_SendsRequest() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); @@ -1661,7 +1741,7 @@ public void testGetConfiguration() throws Exception { { "service": "openai", "name": "OpenAI", - "task_types": ["text_embedding", "completion"], + "task_types": ["text_embedding", "completion", "chat_completion"], "configurations": { "api_key": { "description": "The OpenAI API authentication key. For more details about generating OpenAI API keys, refer to the https://platform.openai.com/account/api-keys.", diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml index 2f3bcfae600e7..71db83e4667df 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml @@ -173,10 +173,6 @@ setup: --- "Query using a sparse embedding model via a search inference ID": - - requires: - cluster_features: "semantic_text.search_inference_id" - reason: search_inference_id introduced in 8.16.0 - - skip: features: [ "headers", "close_to" ] @@ -357,10 +353,6 @@ setup: --- "Query using a dense embedding model via a search inference ID": - - requires: - cluster_features: "semantic_text.search_inference_id" - reason: search_inference_id introduced in 8.16.0 - - skip: features: [ "headers", "close_to" ] @@ -737,10 +729,6 @@ setup: --- "Query a field with a search inference ID that uses the wrong task type": - - requires: - cluster_features: "semantic_text.search_inference_id" - reason: search_inference_id introduced in 8.16.0 - - do: indices.put_mapping: index: test-sparse-index @@ -778,10 +766,6 @@ setup: --- "Query a field with a search inference ID that uses the wrong dimension count": - - requires: - cluster_features: "semantic_text.search_inference_id" - reason: search_inference_id introduced in 8.16.0 - - do: inference.put: task_type: text_embedding @@ -835,10 +819,6 @@ setup: --- "Query a field with an invalid search inference ID": - - requires: - cluster_features: "semantic_text.search_inference_id" - reason: search_inference_id introduced in 8.16.0 - - do: indices.put_mapping: index: test-dense-index diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml index a3c55dfddf611..88569daaa6070 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml @@ -4,8 +4,6 @@ setup: - close_to - contains - requires: - cluster_features: "text_similarity_reranker_retriever_supported" - reason: semantic reranking introduced in 8.15.0 test_runner_features: "close_to" - do: diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java index bd8093c0a01c1..177858b84ad43 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java @@ -281,4 +281,57 @@ public void testLogsdbRouteOnSortFields() throws IOException { assertEquals("true", settings.get(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey())); assertEquals(List.of("host.name", "message"), settings.get(IndexMetadata.INDEX_ROUTING_PATH.getKey())); } + + public void testLogsdbDefaultWithRecoveryUseSyntheticSource() throws IOException { + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{ \"transient\": { \"cluster.logsdb.enabled\": true } }"); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["my-log-*"], + "data_stream": { + }, + "template": { + "settings":{ + "index": { + "mode": "logsdb", + "recovery.use_synthetic_source" : "true" + } + }, + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "message": { + "type": "keyword" + } + } + } + } + } + """); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/my-log-foo/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "2020-01-01T00:00:00.000Z", + "host.name": "foo", + "message": "bar" + } + """); + assertOK(client().performRequest(request)); + + String index = DataStream.getDefaultBackingIndexName("my-log-foo", 1); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertNull(settings.get("index.mapping.source.mode")); + assertEquals("true", settings.get(IndexSettings.LOGSDB_SORT_ON_HOST_NAME.getKey())); + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java index 266847209f495..6c18626edfb7a 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ActionPlugin; @@ -69,7 +70,10 @@ public Collection createComponents(PluginServices services) { public Collection getAdditionalIndexSettingProviders(IndexSettingProvider.Parameters parameters) { logsdbIndexModeSettingsProvider.init( parameters.mapperServiceFactory(), - () -> parameters.clusterService().state().nodes().getMinSupportedIndexVersion(), + () -> IndexVersion.min( + IndexVersion.current(), + parameters.clusterService().state().nodes().getMaxDataNodeCompatibleIndexVersion() + ), DiscoveryNode.isStateless(settings) == false ); return List.of(logsdbIndexModeSettingsProvider); diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java index cdf02273b9df5..19f5039a1075d 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -24,7 +23,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatures; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; @@ -32,14 +30,12 @@ public class LogsDBUsageTransportAction extends XPackUsageFeatureTransportAction { private final ClusterService clusterService; - private final FeatureService featureService; private final Client client; @Inject public LogsDBUsageTransportAction( TransportService transportService, ClusterService clusterService, - FeatureService featureService, ThreadPool threadPool, ActionFilters actionFilters, Client client, @@ -54,7 +50,6 @@ public LogsDBUsageTransportAction( indexNameExpressionResolver ); this.clusterService = clusterService; - this.featureService = featureService; this.client = client; } @@ -77,31 +72,23 @@ protected void masterOperation( } final boolean enabled = LogsDBPlugin.CLUSTER_LOGSDB_ENABLED.get(clusterService.getSettings()); final boolean hasCustomCutoffDate = System.getProperty(SyntheticSourceLicenseService.CUTOFF_DATE_SYS_PROP_NAME) != null; - if (featureService.clusterHasFeature(state, XPackFeatures.LOGSDB_TELMETRY_STATS)) { - final DiscoveryNode[] nodes = state.nodes().getDataNodes().values().toArray(DiscoveryNode[]::new); - final var statsRequest = new IndexModeStatsActionType.StatsRequest(nodes); - final int finalNumIndices = numIndices; - final int finalNumIndicesWithSyntheticSources = numIndicesWithSyntheticSources; - client.execute(IndexModeStatsActionType.TYPE, statsRequest, listener.map(statsResponse -> { - final var indexStats = statsResponse.stats().get(IndexMode.LOGSDB); - return new XPackUsageFeatureResponse( - new LogsDBFeatureSetUsage( - true, - enabled, - finalNumIndices, - finalNumIndicesWithSyntheticSources, - indexStats.numDocs(), - indexStats.numBytes(), - hasCustomCutoffDate - ) - ); - })); - } else { - listener.onResponse( - new XPackUsageFeatureResponse( - new LogsDBFeatureSetUsage(true, enabled, numIndices, numIndicesWithSyntheticSources, 0L, 0L, hasCustomCutoffDate) + final DiscoveryNode[] nodes = state.nodes().getDataNodes().values().toArray(DiscoveryNode[]::new); + final var statsRequest = new IndexModeStatsActionType.StatsRequest(nodes); + final int finalNumIndices = numIndices; + final int finalNumIndicesWithSyntheticSources = numIndicesWithSyntheticSources; + client.execute(IndexModeStatsActionType.TYPE, statsRequest, listener.map(statsResponse -> { + final var indexStats = statsResponse.stats().get(IndexMode.LOGSDB); + return new XPackUsageFeatureResponse( + new LogsDBFeatureSetUsage( + true, + enabled, + finalNumIndices, + finalNumIndicesWithSyntheticSources, + indexStats.numDocs(), + indexStats.numBytes(), + hasCustomCutoffDate ) ); - } + })); } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java index 633dd6123c088..29b3a80ce2896 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexMode; @@ -30,12 +31,14 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_PATH; @@ -44,6 +47,7 @@ final class LogsdbIndexModeSettingsProvider implements IndexSettingProvider { private static final Logger LOGGER = LogManager.getLogger(LogsdbIndexModeSettingsProvider.class); private static final String LOGS_PATTERN = "logs-*-*"; + private static final Set MAPPING_INCLUDES = Set.of("_doc._source.*", "_doc.properties.host**", "_doc.subobjects"); private final SyntheticSourceLicenseService syntheticSourceLicenseService; private final SetOnce> mapperServiceFactory = new SetOnce<>(); @@ -232,6 +236,19 @@ MappingHints getMappingHints( // combinedTemplateMappings can be empty when creating a normal index that doesn't match any template and without mapping. if (combinedTemplateMappings == null || combinedTemplateMappings.isEmpty()) { combinedTemplateMappings = List.of(new CompressedXContent("{}")); + } else { + // Filter the mapping to contain only the part this index settings provider is interested in. + // This reduces the overhead of loading mappings, since mappings can be very large. + // The _doc._source.mode is needed to determine synthetic source usage. + // The _doc.properties.host* is needed to determine whether host.name field can be injected. + // The _doc.subobjects is needed to determine whether subobjects is enabled. + List filteredMappings = new ArrayList<>(combinedTemplateMappings.size()); + for (CompressedXContent mappingSource : combinedTemplateMappings) { + var ref = mappingSource.compressedReference(); + var map = XContentHelper.convertToMap(ref, true, XContentType.JSON, MAPPING_INCLUDES, Set.of()).v2(); + filteredMappings.add(new CompressedXContent(map)); + } + combinedTemplateMappings = filteredMappings; } mapperService.merge(MapperService.SINGLE_MAPPING_NAME, combinedTemplateMappings, MapperService.MergeReason.INDEX_TEMPLATE); Mapper hostName = mapperService.mappingLookup().getMapper("host.name"); @@ -251,7 +268,7 @@ MappingHints getMappingHints( // In case invalid mappings or setting are provided, then mapper service creation can fail. // In that case it is ok to return false here. The index creation will fail anyway later, so no need to fallback to stored // source. - LOGGER.info(() -> Strings.format("unable to create mapper service for index [%s]", indexName), e); + LOGGER.warn(() -> Strings.format("unable to create mapper service for index [%s]", indexName), e); return MappingHints.EMPTY; } } diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/30_logsdb_default_mapping.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/30_logsdb_default_mapping.yml index 1fd10c2028fd2..b49ada6e1809c 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/30_logsdb_default_mapping.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/30_logsdb_default_mapping.yml @@ -2,9 +2,6 @@ create logsdb data stream with host.name as keyword: - requires: test_runner_features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: cluster.put_component_template: @@ -38,9 +35,6 @@ create logsdb data stream with host.name as keyword: create logsdb data stream with host.name as keyword and timestamp as date: - requires: test_runner_features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: cluster.put_component_template: @@ -76,9 +70,6 @@ create logsdb data stream with host.name as keyword and timestamp as date: create logsdb data stream with host.name as integer and timestamp as date: - requires: test_runner_features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: cluster.put_component_template: @@ -107,16 +98,120 @@ create logsdb data stream with host.name as integer and timestamp as date: - do: indices.create_data_stream: name: "logsdb" + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: "logsdb" + expand_wildcards: hidden + - length: { data_streams: 1 } + - set: { data_streams.0.indices.0.index_name: backing_index } + + - do: + indices.get_settings: + index: $backing_index + - match: { .$backing_index.settings.index.mode: logsdb } + - is_false: .$backing_index.settings.index.logsdb.add_host_name_field + - match: { .$backing_index.settings.index.logsdb.sort_on_host_name: "true" } + +--- +create logsdb data stream with no host.name and timestamp as date: + - requires: + test_runner_features: [ "allowed_warnings" ] + + - do: + cluster.put_component_template: + name: "logsdb-mappings" + body: + template: + settings: + mode: "logsdb" + mappings: + properties: + "@timestamp": + type: "date" + - do: + indices.put_index_template: + name: "logsdb-index-template" + body: + index_patterns: ["logsdb"] + data_stream: {} + composed_of: ["logsdb-mappings"] + allowed_warnings: + - "index template [logsdb-index-template] has index patterns [logsdb] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logsdb-index-template] will take precedence during new index creation" + + - do: + indices.create_data_stream: + name: "logsdb" - is_true: acknowledged + - do: + indices.get_data_stream: + name: "logsdb" + expand_wildcards: hidden + - length: { data_streams: 1 } + - set: { data_streams.0.indices.0.index_name: backing_index } + + - do: + indices.get_settings: + index: $backing_index + - match: { .$backing_index.settings.index.mode: logsdb } + - match: { .$backing_index.settings.index.logsdb.add_host_name_field: "true" } + - match: { .$backing_index.settings.index.logsdb.sort_on_host_name: "true" } + --- -create logsdb data stream with host as keyword: +create logsdb data stream with host as keyword and timestamp as date: - requires: test_runner_features: [ "allowed_warnings" ] + + - do: + cluster.put_component_template: + name: "logsdb-mappings" + body: + template: + settings: + mode: "logsdb" + mappings: + properties: + host: + type: "keyword" + "@timestamp": + type: "date" + + - do: + indices.put_index_template: + name: "logsdb-index-template" + body: + index_patterns: ["logsdb"] + data_stream: {} + composed_of: ["logsdb-mappings"] + allowed_warnings: + - "index template [logsdb-index-template] has index patterns [logsdb] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logsdb-index-template] will take precedence during new index creation" + + - do: + indices.create_data_stream: + name: "logsdb" + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: "logsdb" + expand_wildcards: hidden + - length: { data_streams: 1 } + - set: { data_streams.0.indices.0.index_name: backing_index } + + - do: + indices.get_settings: + index: $backing_index + - match: { .$backing_index.settings.index.mode: logsdb } + - is_false: .$backing_index.settings.index.logsdb.add_host_name_field + - is_false: .$backing_index.settings.index.logsdb.sort_on_host_name + +--- +create logsdb data stream with host as keyword: - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields + test_runner_features: [ "allowed_warnings" ] - do: cluster.put_component_template: @@ -150,9 +245,6 @@ create logsdb data stream with host as keyword: create logsdb data stream with host as text and multi fields: - requires: test_runner_features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: cluster.put_component_template: @@ -193,9 +285,6 @@ create logsdb data stream with host as text and multi fields: create logsdb data stream with host as text: - requires: test_runner_features: [ "allowed_warnings" ] - - requires: - cluster_features: ["mapper.keyword_normalizer_synthetic_source"] - reason: "Support for normalizer on keyword fields" - do: cluster.put_component_template: @@ -232,9 +321,6 @@ create logsdb data stream with host as text: create logsdb data stream with host as text and name as double: - requires: test_runner_features: [ "allowed_warnings" ] - - requires: - cluster_features: ["mapper.keyword_normalizer_synthetic_source"] - reason: "Support for normalizer on keyword fields" - do: cluster.put_component_template: @@ -274,9 +360,6 @@ create logsdb data stream with host as text and name as double: create logsdb data stream with timestamp object mapping: - requires: test_runner_features: [ "allowed_warnings" ] - - requires: - cluster_features: ["mapper.keyword_normalizer_synthetic_source"] - reason: "Support for normalizer on keyword fields" - do: cluster.put_component_template: @@ -315,9 +398,6 @@ create logsdb data stream with timestamp object mapping: create logsdb data stream with custom sorting without host.name: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -365,9 +445,6 @@ create logsdb data stream with custom sorting without host.name: create logsdb data stream with custom sorting and host object: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -424,9 +501,6 @@ create logsdb data stream with custom sorting and host object: create logsdb data stream with custom sorting and dynamically mapped host.name: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -489,9 +563,6 @@ create logsdb data stream with custom sorting and dynamically mapped host.name: create logsdb data stream with custom sorting and host.name object: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -545,9 +616,6 @@ create logsdb data stream with custom sorting and host.name object: create logsdb data stream with default sorting on malformed host.name: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -609,9 +677,6 @@ create logsdb data stream with default sorting on malformed host.name: create logsdb data stream with custom sorting and host.name date field: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -662,9 +727,6 @@ create logsdb data stream with custom sorting and host.name date field: create logsdb data stream with custom sorting and missing host.name field mapping: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -713,9 +775,6 @@ create logsdb data stream with custom sorting and missing host.name field mappin create logsdb data stream with custom sorting and host.name field without doc values: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -755,9 +814,6 @@ create logsdb data stream with custom sorting and host.name field without doc va create logsdb data stream with incompatible ignore_above on host.name: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -803,9 +859,6 @@ create logsdb data stream with incompatible ignore_above on host.name: create logsdb data stream with no sorting and host.name as text: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -846,9 +899,6 @@ create logsdb data stream with no sorting and host.name as text: create logsdb data stream without index sorting and ignore_above on host.name: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -892,9 +942,6 @@ create logsdb data stream without index sorting and ignore_above on host.name: create logsdb data stream with host.name as alias and sorting on it: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -930,9 +977,6 @@ create logsdb data stream with host.name as alias and sorting on it: create logsdb data stream with multi-fields on host.name: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -977,9 +1021,6 @@ create logsdb data stream with multi-fields on host.name: create logsdb data stream with multi-fields on host.name and no sorting: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -1021,9 +1062,6 @@ create logsdb data stream with multi-fields on host.name and no sorting: create logsdb data stream with custom empty sorting: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: @@ -1068,9 +1106,6 @@ create logsdb data stream with custom empty sorting: create logsdb data stream with custom sorting on timestamp: - skip: features: [ "allowed_warnings" ] - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: allowed_warnings: diff --git a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml index 22f69e30650fd..eb7c1df91de60 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml +++ b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/80_synthetic_source.yml @@ -5,10 +5,6 @@ setup: --- synthetic source: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires synthetic source support - - do: indices.create: index: synthetic_source_test @@ -51,10 +47,6 @@ synthetic source: --- synthetic source with copy_to: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: synthetic_source_test @@ -111,10 +103,6 @@ synthetic source with copy_to: --- synthetic source with disabled doc_values: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires disabled doc_values support in synthetic source - - do: indices.create: index: synthetic_source_test diff --git a/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml b/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml index 3095a19fa29d0..0989a92f2ca01 100644 --- a/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml +++ b/x-pack/plugin/mapper-version/src/yamlRestTest/resources/rest-api-spec/test/40_synthetic_source.yml @@ -69,10 +69,6 @@ script values: --- synthetic source with copy_to: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: synthetic_source_test diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java index f1b14658c6b8e..34bc650caec58 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java @@ -54,7 +54,7 @@ public void testDestIndexCreated() throws Exception { ); try { - indicesAdmin().getIndex(new GetIndexRequest().indices(destIndex)).actionGet(); + indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(destIndex)).actionGet(); } catch (IndexNotFoundException e) { fail(); } diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java index b310d332fdd5c..cfd4f0901336d 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java @@ -424,7 +424,7 @@ public void testTsdbStartEndSet() throws Exception { backingIndexName = indexResponse.getIndex(); } - var sourceSettings = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndexName)) + var sourceSettings = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndexName)) .actionGet() .getSettings() .get(backingIndexName); @@ -446,7 +446,10 @@ public void testTsdbStartEndSet() throws Exception { .actionGet() .getDestIndex(); - var destSettings = indicesAdmin().getIndex(new GetIndexRequest().indices(destIndex)).actionGet().getSettings().get(destIndex); + var destSettings = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(destIndex)) + .actionGet() + .getSettings() + .get(destIndex); var destStart = IndexSettings.TIME_SERIES_START_TIME.get(destSettings); var destEnd = IndexSettings.TIME_SERIES_END_TIME.get(destSettings); @@ -488,38 +491,11 @@ private static String formatInstant(Instant instant) { } private static String getIndexUUID(String index) { - return indicesAdmin().getIndex(new GetIndexRequest().indices(index)) + return indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)) .actionGet() .getSettings() .get(index) .get(IndexMetadata.SETTING_INDEX_UUID); } - public void testGenerateDestIndexName_noDotPrefix() { - String sourceIndex = "sourceindex"; - String expectedDestIndex = "migrated-sourceindex"; - String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); - assertEquals(expectedDestIndex, actualDestIndex); - } - - public void testGenerateDestIndexName_withDotPrefix() { - String sourceIndex = ".sourceindex"; - String expectedDestIndex = ".migrated-sourceindex"; - String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); - assertEquals(expectedDestIndex, actualDestIndex); - } - - public void testGenerateDestIndexName_withHyphen() { - String sourceIndex = "source-index"; - String expectedDestIndex = "migrated-source-index"; - String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); - assertEquals(expectedDestIndex, actualDestIndex); - } - - public void testGenerateDestIndexName_withUnderscore() { - String sourceIndex = "source_index"; - String expectedDestIndex = "migrated-source_index"; - String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); - assertEquals(expectedDestIndex, actualDestIndex); - } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java index 55ec4065be8c4..93b90e551e721 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java @@ -59,6 +59,7 @@ import java.util.function.Supplier; import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING; import static org.elasticsearch.xpack.migrate.task.ReindexDataStreamPersistentTaskExecutor.MAX_CONCURRENT_INDICES_REINDEXED_PER_DATA_STREAM_SETTING; public class MigratePlugin extends Plugin implements ActionPlugin, PersistentTaskPlugin { @@ -160,6 +161,7 @@ public List> getPersistentTasksExecutor( public List> getSettings() { List> pluginSettings = new ArrayList<>(); pluginSettings.add(MAX_CONCURRENT_INDICES_REINDEXED_PER_DATA_STREAM_SETTING); + pluginSettings.add(REINDEX_MAX_REQUESTS_PER_SECOND_SETTING); return pluginSettings; } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java index 7bb440bc52a15..f1810d85ffd16 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java @@ -8,13 +8,17 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; @@ -24,13 +28,16 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Assertions; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.ReindexAction; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -46,8 +53,37 @@ public class ReindexDataStreamIndexTransportAction extends HandledTransportActio ReindexDataStreamIndexAction.Request, ReindexDataStreamIndexAction.Response> { + public static final String REINDEX_MAX_REQUESTS_PER_SECOND_KEY = "migrate.data_stream_reindex_max_request_per_second"; + + public static final Setting REINDEX_MAX_REQUESTS_PER_SECOND_SETTING = new Setting<>( + REINDEX_MAX_REQUESTS_PER_SECOND_KEY, + Float.toString(10f), + s -> { + if (s.equals("-1")) { + return Float.POSITIVE_INFINITY; + } else { + return Float.parseFloat(s); + } + }, + value -> { + if (value <= 0f) { + throw new IllegalArgumentException( + "Failed to parse value [" + + value + + "] for setting [" + + REINDEX_MAX_REQUESTS_PER_SECOND_KEY + + "] " + + "must be greater than 0 or -1 for infinite" + ); + } + }, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private static final Logger logger = LogManager.getLogger(ReindexDataStreamIndexTransportAction.class); private static final IndicesOptions IGNORE_MISSING_OPTIONS = IndicesOptions.fromOptions(true, true, false, false); + private final ClusterService clusterService; private final Client client; @@ -108,6 +144,7 @@ protected void doExecute( .andThen(l -> createIndex(sourceIndex, destIndexName, l, taskId)) .andThen(l -> reindex(sourceIndexName, destIndexName, l, taskId)) .andThen(l -> copyOldSourceSettingsToDest(settingsBefore, destIndexName, l, taskId)) + .andThen(l -> sanityCheck(sourceIndexName, destIndexName, l, taskId)) .andThenApply(ignored -> new ReindexDataStreamIndexAction.Response(destIndexName)) .addListener(listener); } @@ -176,7 +213,8 @@ private void createIndex( client.execute(CreateIndexFromSourceAction.INSTANCE, request, failIfNotAcknowledged(listener, errorMessage)); } - private void reindex(String sourceIndexName, String destIndexName, ActionListener listener, TaskId parentTaskId) { + // Visible for testing + void reindex(String sourceIndexName, String destIndexName, ActionListener listener, TaskId parentTaskId) { logger.debug("Reindex to destination index [{}] from source index [{}]", destIndexName, sourceIndexName); var reindexRequest = new ReindexRequest(); reindexRequest.setSourceIndices(sourceIndexName); @@ -184,25 +222,11 @@ private void reindex(String sourceIndexName, String destIndexName, ActionListene reindexRequest.getSearchRequest().source().fetchSource(true); reindexRequest.setDestIndex(destIndexName); reindexRequest.setParentTask(parentTaskId); + reindexRequest.setRequestsPerSecond(clusterService.getClusterSettings().get(REINDEX_MAX_REQUESTS_PER_SECOND_SETTING)); reindexRequest.setSlices(0); // equivalent to slices=auto in rest api client.execute(ReindexAction.INSTANCE, reindexRequest, listener); } - private void addBlockIfFromSource( - IndexMetadata.APIBlock block, - Settings settingsBefore, - String destIndexName, - ActionListener listener, - TaskId parentTaskId - ) { - if (settingsBefore.getAsBoolean(block.settingName(), false)) { - var errorMessage = String.format(Locale.ROOT, "Add [%s] block to index [%s] was not acknowledged", block.name(), destIndexName); - addBlockToIndex(block, destIndexName, failIfNotAcknowledged(listener, errorMessage), parentTaskId); - } else { - listener.onResponse(null); - } - } - private void updateSettings( String index, Settings.Builder settings, @@ -270,4 +294,50 @@ private void addBlockToIndex( addIndexBlockRequest.setParentTask(parentTaskId); client.admin().indices().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest, listener); } + + private void getIndexDocCount(String index, TaskId parentTaskId, ActionListener listener) { + SearchRequest countRequest = new SearchRequest(index); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0).trackTotalHits(true); + countRequest.allowPartialSearchResults(false); + countRequest.source(searchSourceBuilder); + countRequest.setParentTask(parentTaskId); + client.search(countRequest, listener.delegateFailure((delegate, response) -> { + var totalHits = response.getHits().getTotalHits(); + assert totalHits.relation() == TotalHits.Relation.EQUAL_TO; + delegate.onResponse(totalHits.value()); + })); + } + + private void sanityCheck( + String sourceIndexName, + String destIndexName, + ActionListener listener, + TaskId parentTaskId + ) { + if (Assertions.ENABLED) { + logger.debug("Comparing source [{}] and dest [{}] doc counts", sourceIndexName, destIndexName); + client.execute( + RefreshAction.INSTANCE, + new RefreshRequest(destIndexName), + listener.delegateFailureAndWrap((delegate, ignored) -> { + getIndexDocCount(sourceIndexName, parentTaskId, delegate.delegateFailureAndWrap((delegate1, sourceCount) -> { + getIndexDocCount(destIndexName, parentTaskId, delegate1.delegateFailureAndWrap((delegate2, destCount) -> { + assert sourceCount == destCount + : String.format( + Locale.ROOT, + "source index [%s] has %d docs and dest [%s] has %d docs", + sourceIndexName, + sourceCount, + destIndexName, + destCount + ); + delegate2.onResponse(null); + })); + })); + }) + ); + } else { + listener.onResponse(null); + } + } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index 104f843cc7950..ab86d957c39db 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -11,11 +11,14 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; import org.elasticsearch.action.support.CountDownActionListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.DataStream; @@ -211,26 +214,29 @@ private void maybeProcessNextIndex( reindexDataStreamTask.incrementInProgressIndicesCount(index.getName()); ReindexDataStreamIndexAction.Request reindexDataStreamIndexRequest = new ReindexDataStreamIndexAction.Request(index.getName()); reindexDataStreamIndexRequest.setParentTask(parentTaskId); - reindexClient.execute(ReindexDataStreamIndexAction.INSTANCE, reindexDataStreamIndexRequest, ActionListener.wrap(response1 -> { - updateDataStream(sourceDataStream, index.getName(), response1.getDestIndex(), ActionListener.wrap(unused -> { + + SubscribableListener.newForked( + l -> reindexClient.execute(ReindexDataStreamIndexAction.INSTANCE, reindexDataStreamIndexRequest, l) + ) + .andThen( + (l, result) -> updateDataStream(sourceDataStream, index.getName(), result.getDestIndex(), l, reindexClient, parentTaskId) + ) + .andThen(l -> deleteIndex(index.getName(), reindexClient, parentTaskId, l)) + .addListener(ActionListener.wrap(unused -> { reindexDataStreamTask.reindexSucceeded(index.getName()); listener.onResponse(null); maybeProcessNextIndex(indicesRemaining, reindexDataStreamTask, reindexClient, sourceDataStream, listener, parentTaskId); - }, exception -> { - reindexDataStreamTask.reindexFailed(index.getName(), exception); + }, e -> { + reindexDataStreamTask.reindexFailed(index.getName(), e); listener.onResponse(null); - }), reindexClient, parentTaskId); - }, exception -> { - reindexDataStreamTask.reindexFailed(index.getName(), exception); - listener.onResponse(null); - })); + })); } private void updateDataStream( String dataStream, String oldIndex, String newIndex, - ActionListener listener, + ActionListener listener, ExecuteWithHeadersClient reindexClient, TaskId parentTaskId ) { @@ -240,17 +246,18 @@ private void updateDataStream( List.of(DataStreamAction.removeBackingIndex(dataStream, oldIndex), DataStreamAction.addBackingIndex(dataStream, newIndex)) ); modifyDataStreamRequest.setParentTask(parentTaskId); - reindexClient.execute(ModifyDataStreamsAction.INSTANCE, modifyDataStreamRequest, new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse response) { - listener.onResponse(null); - } + reindexClient.execute(ModifyDataStreamsAction.INSTANCE, modifyDataStreamRequest, listener); + } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + private void deleteIndex( + String indexName, + ExecuteWithHeadersClient reindexClient, + TaskId parentTaskId, + ActionListener listener + ) { + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName); + deleteIndexRequest.setParentTask(parentTaskId); + reindexClient.execute(TransportDeleteIndexAction.TYPE, deleteIndexRequest, listener); } private void completeSuccessfulPersistentTask( diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportActionTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportActionTests.java new file mode 100644 index 0000000000000..99e1031dec3a2 --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportActionTests.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; +import org.mockito.Answers; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import java.util.Collections; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +public class ReindexDataStreamIndexTransportActionTests extends ESTestCase { + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private TransportService transportService; + @Mock + private ClusterService clusterService; + @Mock + private ActionFilters actionFilters; + @Mock + private Client client; + + @InjectMocks + private ReindexDataStreamIndexTransportAction action; + + @Captor + private ArgumentCaptor request; + + private AutoCloseable mocks; + + @Before + public void setUp() throws Exception { + super.setUp(); + mocks = MockitoAnnotations.openMocks(this); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + mocks.close(); + } + + public void testGenerateDestIndexName_noDotPrefix() { + String sourceIndex = "sourceindex"; + String expectedDestIndex = "migrated-sourceindex"; + String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndex, actualDestIndex); + } + + public void testGenerateDestIndexName_withDotPrefix() { + String sourceIndex = ".sourceindex"; + String expectedDestIndex = ".migrated-sourceindex"; + String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndex, actualDestIndex); + } + + public void testGenerateDestIndexName_withHyphen() { + String sourceIndex = "source-index"; + String expectedDestIndex = "migrated-source-index"; + String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndex, actualDestIndex); + } + + public void testGenerateDestIndexName_withUnderscore() { + String sourceIndex = "source_index"; + String expectedDestIndex = "migrated-source_index"; + String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndex, actualDestIndex); + } + + public void testReindexIncludesRateLimit() { + var targetRateLimit = randomFloatBetween(1, 100, true); + Settings settings = Settings.builder() + .put(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING.getKey(), targetRateLimit) + .build(); + + String sourceIndex = randomAlphanumericOfLength(10); + String destIndex = randomAlphanumericOfLength(10); + ActionListener listener = ActionListener.noop(); + TaskId taskId = TaskId.EMPTY_TASK_ID; + + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings( + settings, + Collections.singleton(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING) + ) + ); + + doNothing().when(client).execute(eq(ReindexAction.INSTANCE), request.capture(), eq(listener)); + + action.reindex(sourceIndex, destIndex, listener, taskId); + + ReindexRequest requestValue = request.getValue(); + + assertEquals(targetRateLimit, requestValue.getRequestsPerSecond(), 0.0); + } + + public void testReindexIncludesInfiniteRateLimit() { + Settings settings = Settings.builder() + .put(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING.getKey(), "-1") + .build(); + + String sourceIndex = randomAlphanumericOfLength(10); + String destIndex = randomAlphanumericOfLength(10); + ActionListener listener = ActionListener.noop(); + TaskId taskId = TaskId.EMPTY_TASK_ID; + + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings( + settings, + Collections.singleton(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING) + ) + ); + doNothing().when(client).execute(eq(ReindexAction.INSTANCE), request.capture(), eq(listener)); + + action.reindex(sourceIndex, destIndex, listener, taskId); + + ReindexRequest requestValue = request.getValue(); + + assertEquals(Float.POSITIVE_INFINITY, requestValue.getRequestsPerSecond(), 0.0); + } + + public void testReindexZeroRateLimitThrowsError() { + Settings settings = Settings.builder() + .put(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING.getKey(), "0") + .build(); + + String sourceIndex = randomAlphanumericOfLength(10); + String destIndex = randomAlphanumericOfLength(10); + ActionListener listener = ActionListener.noop(); + TaskId taskId = TaskId.EMPTY_TASK_ID; + + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings( + settings, + Collections.singleton(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING) + ) + ); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.reindex(sourceIndex, destIndex, listener, taskId) + ); + assertEquals( + "Failed to parse value [0.0] for setting [migrate.data_stream_reindex_max_request_per_second]" + + " must be greater than 0 or -1 for infinite", + e.getMessage() + ); + } + + public void testReindexNegativeRateLimitThrowsError() { + float targetRateLimit = randomFloatBetween(-100, -1, true); + Settings settings = Settings.builder() + .put(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING.getKey(), targetRateLimit) + .build(); + + String sourceIndex = randomAlphanumericOfLength(10); + String destIndex = randomAlphanumericOfLength(10); + ActionListener listener = ActionListener.noop(); + TaskId taskId = TaskId.EMPTY_TASK_ID; + + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings( + settings, + Collections.singleton(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING) + ) + ); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.reindex(sourceIndex, destIndex, listener, taskId) + ); + assertEquals( + "Failed to parse value [" + + targetRateLimit + + "] for setting [migrate.data_stream_reindex_max_request_per_second]" + + " must be greater than 0 or -1 for infinite", + e.getMessage() + ); + } +} diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java index 367c1cee8b0ee..caba356f82ee2 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java @@ -83,9 +83,12 @@ public void cleanup() { updateClusterSettings(Settings.builder().putNull("logger.org.elasticsearch.xpack.ml.datafeed")); cleanUp(); // Race conditions between closing and killing tasks in these tests, - // sometimes result in lingering persistent tasks (such as "_close"), - // which cause subsequent tests to fail. - client().execute(TransportCancelTasksAction.TYPE, new CancelTasksRequest()); + // sometimes result in lingering persistent close tasks, which cause + // subsequent tests to fail. Therefore, they're explicitly cancelled. + CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); + cancelTasksRequest.setActions("*close*"); + cancelTasksRequest.setWaitForCompletion(true); + client().execute(TransportCancelTasksAction.TYPE, cancelTasksRequest).actionGet(); } public void testLookbackOnly() throws Exception { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index 39519dc7931d0..50b9597bb0326 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -133,7 +133,7 @@ public void testDeleteExpiredDataActionDeletesEmptyStateIndices() throws Excepti client().admin().indices().prepareCreate(".ml-state-000007").addAlias(new Alias(".ml-state-write").isHidden(true)).get(); refresh(); - GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(".ml-state*").get(); + GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(".ml-state*").get(); assertThat( Strings.toString(getIndexResponse), getIndexResponse.getIndices(), @@ -143,7 +143,7 @@ public void testDeleteExpiredDataActionDeletesEmptyStateIndices() throws Excepti client().execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request()).get(); refresh(); - getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(".ml-state*").get(); + getIndexResponse = client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(".ml-state*").get(); assertThat( Strings.toString(getIndexResponse), getIndexResponse.getIndices(), diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java index e8acc37e0e153..c90c461fe8b1a 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java @@ -404,7 +404,7 @@ protected static void assertModelStatePersisted(String stateDocId) { } protected static void assertMlResultsFieldMappings(String index, String predictedClassField, String expectedType) { - Map mappings = client().execute(GetIndexAction.INSTANCE, new GetIndexRequest().indices(index)) + Map mappings = client().execute(GetIndexAction.INSTANCE, new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)) .actionGet() .mappings() .get(index) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java index d179a28aa9890..57fbf44f2565c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java @@ -129,7 +129,9 @@ public void testMLFeatureReset() throws Exception { ); client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertBusy(() -> { - List indices = Arrays.asList(client().admin().indices().prepareGetIndex().addIndices(".ml*").get().indices()); + List indices = Arrays.asList( + client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(".ml*").get().indices() + ); assertThat(indices.toString(), indices, is(empty())); }); assertThat(isResetMode(), is(false)); @@ -160,7 +162,9 @@ public void testMLFeatureResetWithModelDeployment() throws Exception { createModelDeployment(); client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertBusy(() -> { - List indices = Arrays.asList(client().admin().indices().prepareGetIndex().addIndices(".ml*").get().indices()); + List indices = Arrays.asList( + client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(".ml*").get().indices() + ); assertThat(indices.toString(), indices, is(empty())); }); assertThat(isResetMode(), is(false)); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java index 6f5fa72a2b2f6..b163036e94760 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java @@ -321,7 +321,7 @@ public void testNotCreatedWhenAfterOtherMlIndexAndResetInProgress() throws Excep } private boolean annotationsIndexExists(String expectedName) { - GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex() + GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) .setIndices(AnnotationIndex.LATEST_INDEX_NAME) .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) .get(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java index ec95b979fdd6d..40d9d97eddd18 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java @@ -60,7 +60,12 @@ public void testCrudOnTwoJobsInSharedIndex() throws Exception { OriginSettingClient client = new OriginSettingClient(client(), ML_ORIGIN); assertThat( - client.admin().indices().prepareGetIndex().addIndices(AnomalyDetectorsIndex.jobStateIndexPattern()).get().indices(), + client.admin() + .indices() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) + .addIndices(AnomalyDetectorsIndex.jobStateIndexPattern()) + .get() + .indices(), arrayContaining(".ml-state-000001") ); assertThat( @@ -77,7 +82,7 @@ public void testCrudOnTwoJobsInSharedIndex() throws Exception { assertThat( client.admin() .indices() - .prepareGetIndex() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) .addIndices(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)) .get() .indices().length, @@ -86,7 +91,7 @@ public void testCrudOnTwoJobsInSharedIndex() throws Exception { assertThat( client.admin() .indices() - .prepareGetIndex() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) .addIndices(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2)) .get() .indices().length, @@ -137,7 +142,12 @@ public void testForceCloseDoesNotCreateState() throws Exception { OriginSettingClient client = new OriginSettingClient(client(), ML_ORIGIN); assertThat( - client.admin().indices().prepareGetIndex().addIndices(AnomalyDetectorsIndex.jobStateIndexPattern()).get().indices(), + client.admin() + .indices() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) + .addIndices(AnomalyDetectorsIndex.jobStateIndexPattern()) + .get() + .indices(), arrayContaining(".ml-state-000001") ); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobStorageDeletionTaskIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobStorageDeletionTaskIT.java index 4493a680d25cf..46ffa649e18c1 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobStorageDeletionTaskIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobStorageDeletionTaskIT.java @@ -195,7 +195,7 @@ public void testDeleteDedicatedJobWithDataInShared() throws Exception { // Make sure dedicated index is gone assertThat( - indicesAdmin().prepareGetIndex() + indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT) .setIndices(dedicatedIndex) .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN) .get() diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelCRUDIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelCRUDIT.java index 3a08b56ed38a4..00f4748cdf97b 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelCRUDIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelCRUDIT.java @@ -97,7 +97,12 @@ public void testPutTrainedModelAndDefinition() { ).actionGet(); assertThat( - client().admin().indices().prepareGetIndex().addIndices(InferenceIndexConstants.nativeDefinitionStore()).get().indices().length, + client().admin() + .indices() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) + .addIndices(InferenceIndexConstants.nativeDefinitionStore()) + .get() + .indices().length, equalTo(1) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java index 16a0f85028b85..f2bb12b13e30f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java @@ -381,6 +381,6 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java index 2a6d6eb329503..83212578982a7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsTask; import org.elasticsearch.xpack.ml.dataframe.DestinationIndex; import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; @@ -225,7 +226,7 @@ protected void doExecute(ActionListener listener) { ML_ORIGIN, parentTaskClient, GetIndexAction.INSTANCE, - new GetIndexRequest().indices(config.getDest().getIndex()), + new GetIndexRequest(MachineLearning.HARD_CODED_MACHINE_LEARNING_MASTER_NODE_TIMEOUT).indices(config.getDest().getIndex()), destIndexListener ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java index 337b4c397493e..2ee493630f0ef 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.ml.MachineLearning; import java.util.Objects; import java.util.Set; @@ -82,7 +83,9 @@ private void getEmptyStateIndices(ActionListener> listener) { } private void getCurrentStateIndices(ActionListener> listener) { - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(AnomalyDetectorsIndex.jobStateIndexWriteAlias()); + GetIndexRequest getIndexRequest = new GetIndexRequest(MachineLearning.HARD_CODED_MACHINE_LEARNING_MASTER_NODE_TIMEOUT).indices( + AnomalyDetectorsIndex.jobStateIndexWriteAlias() + ); getIndexRequest.setParentTask(parentTaskId); client.admin() .indices() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java index bab012afc3101..ff1a1d19779df 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.monitoring.Monitoring; import org.elasticsearch.xpack.security.Security; @@ -86,6 +87,12 @@ protected XPackLicenseState getLicenseState() { } }); plugins.add(new MockedRollupPlugin()); + plugins.add(new InferencePlugin(settings) { + @Override + protected SSLService getSslService() { + return thisVar.getSslService(); + } + }); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index aeebfabdce704..5cf15454e47f2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -82,7 +82,6 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.utils.MlTaskState; import org.elasticsearch.xpack.ilm.IndexLifecycle; -import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; @@ -161,8 +160,7 @@ protected Collection> nodePlugins() { DataStreamsPlugin.class, // To remove errors from parsing build in templates that contain scaled_float MapperExtrasPlugin.class, - Wildcard.class, - InferencePlugin.class + Wildcard.class ); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsActionTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsActionTests.java index 8935b9f450c1f..fddcee25d636d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsActionTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsActionTests.java @@ -527,7 +527,7 @@ private void checkMonitoringTemplates() { private void assertWatchesExist(boolean exist) { // Check if watches index exists - if (client().admin().indices().prepareGetIndex().addIndices(".watches").get().getIndices().length == 0) { + if (client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(".watches").get().getIndices().length == 0) { fail("Expected [.watches] index with cluster alerts present, but no [.watches] index was found"); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java index 4259fb5328435..f8ac5f9032fee 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java @@ -257,7 +257,7 @@ private void assertTemplatesExist() { private void assertWatchesExist() { // Check if watches index exists - if (client().admin().indices().prepareGetIndex().addIndices(".watches").get().getIndices().length == 0) { + if (client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(".watches").get().getIndices().length == 0) { fail("Expected [.watches] index with cluster alerts present, but no [.watches] index was found"); } @@ -284,7 +284,7 @@ private void assertWatchesExist() { private void assertNoWatchesExist() { // Check if watches index exists - if (client().admin().indices().prepareGetIndex().addIndices(".watches").get().getIndices().length == 0) { + if (client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(".watches").get().getIndices().length == 0) { fail("Expected [.watches] index with cluster alerts present, but no [.watches] index was found"); } diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_tests.yml index 1aafe3765813b..c8330a44cd7ce 100644 --- a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_tests.yml +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_tests.yml @@ -148,9 +148,6 @@ setup: - match: { .$idx0name.mappings.properties.attributes.properties.foo.type: "keyword" } --- IP dimensions: - - requires: - cluster_features: ["routing.multi_value_routing_path"] - reason: support for multi-value dimensions - do: bulk: index: metrics-generic.otel-default diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index b50ca4d928040..e910945830836 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -263,7 +263,7 @@ private void resolveIndices( ) { if (retrieveIndices || retrieveFrozenIndices) { if (clusterIsLocal(clusterWildcard)) { // resolve local indices - GetIndexRequest indexRequest = new GetIndexRequest().local(true) + GetIndexRequest indexRequest = new GetIndexRequest(MasterNodeRequest.INFINITE_MASTER_NODE_TIMEOUT).local(true) .indices(indexWildcards) .features(Feature.SETTINGS) .includeDefaults(false) diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java index bb61fa951948d..494eaa508c14a 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java @@ -13,18 +13,12 @@ import java.util.Set; import static org.elasticsearch.search.retriever.CompoundRetrieverBuilder.INNER_RETRIEVERS_FILTER_SUPPORT; -import static org.elasticsearch.xpack.rank.rrf.RRFRetrieverBuilder.RRF_RETRIEVER_COMPOSITION_SUPPORTED; /** * A set of features specifically for the rrf plugin. */ public class RRFFeatures implements FeatureSpecification { - @Override - public Set getFeatures() { - return Set.of(RRFRetrieverBuilder.RRF_RETRIEVER_SUPPORTED, RRF_RETRIEVER_COMPOSITION_SUPPORTED); - } - @Override public Set getTestFeatures() { return Set.of(INNER_RETRIEVERS_FILTER_SUPPORT); diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java index b5bca57478684..91bc19a3e0903 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java @@ -194,9 +194,6 @@ public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorCo @Override public RetrieverBuilder toRetriever(SearchSourceBuilder source, Predicate clusterSupportsFeature) { - if (false == clusterSupportsFeature.test(RRFRetrieverBuilder.RRF_RETRIEVER_COMPOSITION_SUPPORTED)) { - return null; - } int totalQueries = source.subSearches().size() + source.knnSearch().size(); if (totalQueries < 2) { throw new IllegalArgumentException("[rrf] requires at least 2 sub-queries to be defined"); diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index 186febfda18f3..a749a7c402c30 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -8,9 +8,7 @@ package org.elasticsearch.xpack.rank.rrf; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.search.rank.RankBuilder; @@ -44,8 +42,6 @@ public final class RRFRetrieverBuilder extends CompoundRetrieverBuilder { public static final String NAME = "rrf"; - public static final NodeFeature RRF_RETRIEVER_SUPPORTED = new NodeFeature("rrf_retriever_supported", true); - public static final NodeFeature RRF_RETRIEVER_COMPOSITION_SUPPORTED = new NodeFeature("rrf_retriever_composition_supported", true); public static final ParseField RETRIEVERS_FIELD = new ParseField("retrievers"); public static final ParseField RANK_CONSTANT_FIELD = new ParseField("rank_constant"); @@ -79,12 +75,6 @@ public final class RRFRetrieverBuilder extends CompoundRetrieverBuilder ssb.parseXContent(parser, true, nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED) - ); - assertEquals("unknown retriever [rrf]", iae.getMessage()); - } - } - /** Tests extraction errors related to compound retrievers. These tests require a compound retriever which is why they are here. */ public void testRetrieverExtractionErrors() throws IOException { try ( diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml index 258ab70cd09bd..ac328967d9fcf 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: 'rrf_retriever_supported' - reason: 'test requires rrf retriever implementation' - - do: indices.create: index: test diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/350_rrf_retriever_pagination.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/350_rrf_retriever_pagination.yml index d5d7a5de1dc71..e7a4bf8a5b34e 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/350_rrf_retriever_pagination.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/350_rrf_retriever_pagination.yml @@ -4,10 +4,6 @@ setup: - close_to - contains - - requires: - cluster_features: 'rrf_retriever_composition_supported' - reason: 'test requires rrf retriever composition support' - - do: indices.create: index: test diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml index bbc1087b05cc3..4dea4c3568739 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml @@ -2,10 +2,6 @@ setup: - skip: features: close_to - - requires: - cluster_features: 'rrf_retriever_supported' - reason: 'test requires rrf retriever implementation' - - do: indices.create: index: test diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/600_rrf_retriever_profile.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/600_rrf_retriever_profile.yml index 24259e3aa2a85..b1c5ec1664c8f 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/600_rrf_retriever_profile.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/600_rrf_retriever_profile.yml @@ -1,7 +1,5 @@ setup: - requires: - cluster_features: 'rrf_retriever_composition_supported' - reason: 'test requires rrf retriever composition support' test_runner_features: close_to - do: diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml index cb30542d80003..01d645fbfb4f5 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml @@ -1,9 +1,6 @@ setup: - skip: features: close_to - - requires: - cluster_features: 'rrf_retriever_composition_supported' - reason: 'test requires rrf retriever composition support' - do: indices.create: diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/800_rrf_with_text_similarity_reranker_retriever.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/800_rrf_with_text_similarity_reranker_retriever.yml index d9db1fe387625..98c4ae9e642d1 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/800_rrf_with_text_similarity_reranker_retriever.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/800_rrf_with_text_similarity_reranker_retriever.yml @@ -5,8 +5,6 @@ setup: - contains - requires: - cluster_features: ['rrf_retriever_composition_supported', 'text_similarity_reranker_retriever_supported'] - reason: need to have support for rrf and semantic reranking composition test_runner_features: "close_to" - do: diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java index c1c40acbd43c5..0eb2f827b338f 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java @@ -153,7 +153,7 @@ public void testBlobStoreCache() throws Exception { expectThrows( IndexNotFoundException.class, ".snapshot-blob-cache system index should not be created yet", - () -> systemClient().admin().indices().prepareGetIndex().addIndices(SNAPSHOT_BLOB_CACHE_INDEX).get() + () -> systemClient().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(SNAPSHOT_BLOB_CACHE_INDEX).get() ); final Storage storage1 = randomFrom(Storage.values()); diff --git a/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java b/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java index ab5be0f48f5f3..057ebdece5c61 100644 --- a/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java +++ b/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.xpack.frozen.FrozenIndices; import org.elasticsearch.xpack.graph.Graph; import org.elasticsearch.xpack.ilm.IndexLifecycle; -import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.profiling.ProfilingPlugin; import org.elasticsearch.xpack.rollup.Rollup; import org.elasticsearch.xpack.search.AsyncSearch; @@ -89,7 +88,6 @@ protected Collection> getPlugins() { FrozenIndices.class, Graph.class, IndexLifecycle.class, - InferencePlugin.class, IngestCommonPlugin.class, IngestTestPlugin.class, MustachePlugin.class, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java index f7361480f0a51..f051289d6d7cf 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java @@ -321,25 +321,25 @@ public void testGetIndexMappingsIsFiltered() { { GetIndexResponse getIndexResponse = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).admin().indices().prepareGetIndex().setIndices("test").get(); + ).admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices("test").get(); assertExpectedMetadataFields(getIndexResponse.getMappings(), "field1"); } { GetIndexResponse getIndexResponse = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD)) - ).admin().indices().prepareGetIndex().setIndices("test").get(); + ).admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices("test").get(); assertExpectedMetadataFields(getIndexResponse.getMappings(), "field2"); } { GetIndexResponse getIndexResponse = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD)) - ).admin().indices().prepareGetIndex().setIndices("test").get(); + ).admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices("test").get(); assertExpectedMetadataFields(getIndexResponse.getMappings(), "field1"); } { GetIndexResponse getIndexResponse = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD)) - ).admin().indices().prepareGetIndex().setIndices("test").get(); + ).admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices("test").get(); assertExpectedMetadataFields(getIndexResponse.getMappings(), "field1", "field2"); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java index b501ba69d673b..7f262817b9410 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java @@ -149,12 +149,12 @@ public void testGetIndex() throws Exception { final String field = "foo"; indexRandom(true, prepareIndex(index).setSource(field, "bar")); - GetIndexResponse response = indicesAdmin().prepareGetIndex().setIndices(index).get(); + GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(index).get(); assertThat(response.getIndices(), arrayContaining(index)); response = client().filterWithHeader( singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD)) - ).admin().indices().prepareGetIndex().setIndices(index).get(); + ).admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(index).get(); assertThat(response.getIndices(), arrayContaining(index)); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/action/filter/DestructiveOperationsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/action/filter/DestructiveOperationsTests.java index 846314534af72..0a0e6731c90d0 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/action/filter/DestructiveOperationsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/action/filter/DestructiveOperationsTests.java @@ -29,7 +29,7 @@ public void testDeleteIndexDestructiveOperationsRequireName() { () -> indicesAdmin().prepareDelete("*").get() ); assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); - String[] indices = indicesAdmin().prepareGetIndex().setIndices("index1").get().getIndices(); + String[] indices = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices("index1").get().getIndices(); assertEquals(1, indices.length); assertEquals("index1", indices[0]); } @@ -39,7 +39,7 @@ public void testDeleteIndexDestructiveOperationsRequireName() { () -> indicesAdmin().prepareDelete("*", "-index1").get() ); assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); - String[] indices = indicesAdmin().prepareGetIndex().setIndices("index1").get().getIndices(); + String[] indices = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices("index1").get().getIndices(); assertEquals(1, indices.length); assertEquals("index1", indices[0]); } @@ -49,7 +49,7 @@ public void testDeleteIndexDestructiveOperationsRequireName() { () -> indicesAdmin().prepareDelete("_all").get() ); assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); - String[] indices = indicesAdmin().prepareGetIndex().setIndices("index1").get().getIndices(); + String[] indices = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices("index1").get().getIndices(); assertEquals(1, indices.length); assertEquals("index1", indices[0]); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmElasticAutoconfigIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmElasticAutoconfigIntegTests.java index 4d3fa73c8e248..8148c5021e9b9 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmElasticAutoconfigIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmElasticAutoconfigIntegTests.java @@ -100,7 +100,7 @@ public void testAutoconfigFailedPasswordPromotion() { assertAcked(clusterAdmin().updateSettings(updateSettingsRequest).actionGet()); // delete the security index, if it exist - GetIndexRequest getIndexRequest = new GetIndexRequest(); + GetIndexRequest getIndexRequest = new GetIndexRequest(TEST_REQUEST_TIMEOUT); getIndexRequest.indices(SECURITY_MAIN_ALIAS); getIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); GetIndexResponse getIndexResponse = client().admin().indices().getIndex(getIndexRequest).actionGet(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java index 3c639471f80b5..eea5466f23800 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java @@ -77,7 +77,7 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { // view all indices, including restricted ones final GetIndexResponse getIndexResponse = client.admin() .indices() - .prepareGetIndex() + .prepareGetIndex(TEST_REQUEST_TIMEOUT) .setIndices(randomFrom("_all", "*")) .setIndicesOptions(IndicesOptions.strictExpandHidden()) .get(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index 3b55295c1efce..65f6f4f1a5b0a 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -872,7 +872,7 @@ private Set extractUsernames(SuggestProfilesResponse.ProfileHit[] profil } private GetIndexResponse getProfileIndexResponse() { - final GetIndexRequest getIndexRequest = new GetIndexRequest(); + final GetIndexRequest getIndexRequest = new GetIndexRequest(TEST_REQUEST_TIMEOUT); getIndexRequest.indices(".*"); return client().execute(GetIndexAction.INSTANCE, getIndexRequest).actionGet(); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java index 44cbf03f220a1..4d66be350a29c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java @@ -274,7 +274,7 @@ public void testOnIndexAvailableForSearchIndexWaitTimeOut() { public void testSecurityIndexSettingsCannotBeChanged() throws Exception { // make sure the security index is not auto-created - GetIndexRequest getIndexRequest = new GetIndexRequest(); + GetIndexRequest getIndexRequest = new GetIndexRequest(TEST_REQUEST_TIMEOUT); getIndexRequest.indices(SECURITY_MAIN_ALIAS); getIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); GetIndexResponse getIndexResponse = client().admin().indices().getIndex(getIndexRequest).actionGet(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java index 84749d895a44e..409ab62ae3e70 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java @@ -13,19 +13,11 @@ import java.util.Set; import static org.elasticsearch.xpack.security.support.QueryableBuiltInRolesSynchronizer.QUERYABLE_BUILT_IN_ROLES_FEATURE; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MIGRATION_FRAMEWORK; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLE_MAPPING_CLEANUP; public class SecurityFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of( - SECURITY_ROLE_MAPPING_CLEANUP, - SECURITY_ROLES_METADATA_FLATTENED, - SECURITY_MIGRATION_FRAMEWORK, - QUERYABLE_BUILT_IN_ROLES_FEATURE - ); + return Set.of(QUERYABLE_BUILT_IN_ROLES_FEATURE); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index 0a5865ecfe9bf..dc20b1e28ba78 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -96,7 +96,6 @@ import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecurityMigrations.ROLE_METADATA_FLATTENED_MIGRATION_VERSION; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED; /** * NativeRolesStore is a {@code RolesStore} that, instead of reading from a @@ -652,9 +651,7 @@ XContentBuilder createRoleXContentBuilder(RoleDescriptor role) throws IOExceptio XContentBuilder builder = jsonBuilder().startObject(); role.innerToXContent(builder, ToXContent.EMPTY_PARAMS, true); - if (featureService.clusterHasFeature(clusterService.state(), SECURITY_ROLES_METADATA_FLATTENED)) { - builder.field(RoleDescriptor.Fields.METADATA_FLATTENED.getPreferredName(), role.getMetadata()); - } + builder.field(RoleDescriptor.Fields.METADATA_FLATTENED.getPreferredName(), role.getMetadata()); // When role descriptor XContent is generated for the security index all empty fields need to have default values to make sure // existing values are overwritten if not present since the request to update could be an UpdateRequest diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 78f7209c06e3a..39c7a45d51dfd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -69,7 +69,6 @@ import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_DATA_KEY; import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_KEY; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.State.UNRECOVERED_STATE; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MIGRATION_FRAMEWORK; /** * Manages the lifecycle, mapping and data upgrades/migrations of the {@code RestrictedIndicesNames#SECURITY_MAIN_ALIAS} @@ -539,7 +538,6 @@ public boolean isReadyForSecurityMigration(SecurityMigrations.SecurityMigration && state.indexAvailableForSearch && state.isIndexUpToDate && state.indexExists() - && state.securityFeatures.contains(SECURITY_MIGRATION_FRAMEWORK) && isEligibleSecurityMigration(securityMigration); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java index 203dec9e25b91..c380b9fbee93a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java @@ -142,7 +142,7 @@ private void updateRolesByQuery( @Override public Set nodeFeaturesRequired() { - return Set.of(SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED); + return Set.of(); } @Override @@ -221,7 +221,7 @@ public boolean checkPreConditions(SecurityIndexManager.State securityIndexManage @Override public Set nodeFeaturesRequired() { - return Set.of(SecuritySystemIndices.SECURITY_ROLE_MAPPING_CLEANUP); + return Set.of(); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index bf3ecd1feeec4..806b545b85772 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.VersionId; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.ExecutorNames; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -56,9 +55,6 @@ public class SecuritySystemIndices { public static final String INTERNAL_SECURITY_PROFILE_INDEX_8 = ".security-profile-8"; public static final String SECURITY_PROFILE_ALIAS = ".security-profile"; - public static final NodeFeature SECURITY_MIGRATION_FRAMEWORK = new NodeFeature("security.migration_framework", true); - public static final NodeFeature SECURITY_ROLES_METADATA_FLATTENED = new NodeFeature("security.roles_metadata_flattened", true); - public static final NodeFeature SECURITY_ROLE_MAPPING_CLEANUP = new NodeFeature("security.role_mapping_cleanup", true); /** * Security managed index mappings used to be updated based on the product version. They are now updated based on per-index mappings diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 29b8037de5a66..0c5e66344e2a2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -416,7 +416,7 @@ protected void deleteSecurityIndex() { ) ) ); - GetIndexRequest getIndexRequest = new GetIndexRequest(); + GetIndexRequest getIndexRequest = new GetIndexRequest(TEST_REQUEST_TIMEOUT); getIndexRequest.indices(SECURITY_MAIN_ALIAS); getIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); GetIndexResponse getIndexResponse = client.admin().indices().getIndex(getIndexRequest).actionGet(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 5eb9fb9b41a22..4b51e301da252 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -2033,6 +2033,9 @@ public void testInvalidToken() throws Exception { } else if (e instanceof NegativeArraySizeException) { assertThat(e.getMessage(), containsString("array size must be positive but was: ")); latch.countDown(); + } else if (e instanceof ElasticsearchException) { + assertThat(e.getMessage(), containsString(getTestName())); + latch.countDown(); } else { logger.error("unexpected exception", e); latch.countDown(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index c2e9a92e45353..d3b0d3b2e1faa 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -1391,7 +1391,7 @@ public void testScrollRelatedRequestsAllowed() { } public void testAuthorizeIndicesFailures() { - TransportRequest request = new GetIndexRequest().indices("b"); + TransportRequest request = new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("b"); ClusterState state = mockEmptyMetadata(); RoleDescriptor role = new RoleDescriptor( "a_all", @@ -1726,7 +1726,7 @@ public void testDenialErrorMessagesForInvalidateApiKeyAction() { } public void testDenialForAnonymousUser() { - TransportRequest request = new GetIndexRequest().indices("b"); + TransportRequest request = new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("b"); ClusterState state = mockEmptyMetadata(); Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "a_all").build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); @@ -1772,7 +1772,7 @@ public void testDenialForAnonymousUser() { } public void testDenialForAnonymousUserAuthorizationExceptionDisabled() { - TransportRequest request = new GetIndexRequest().indices("b"); + TransportRequest request = new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("b"); ClusterState state = mockEmptyMetadata(); Settings settings = Settings.builder() .put(AnonymousUser.ROLES_SETTING.getKey(), "a_all") @@ -1825,7 +1825,7 @@ public void testDenialForAnonymousUserAuthorizationExceptionDisabled() { public void testAuditTrailIsRecordedWhenIndexWildcardThrowsError() { IndicesOptions options = IndicesOptions.fromOptions(false, false, true, true); - TransportRequest request = new GetIndexRequest().indices("not-an-index-*").indicesOptions(options); + TransportRequest request = new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("not-an-index-*").indicesOptions(options); ClusterState state = mockEmptyMetadata(); RoleDescriptor role = new RoleDescriptor( "a_all", @@ -1929,7 +1929,7 @@ public void testRunAsRequestRunningAsUnAllowedUser() { } public void testRunAsRequestWithRunAsUserWithoutPermission() { - TransportRequest request = new GetIndexRequest().indices("a"); + TransportRequest request = new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("a"); User authenticatedUser = new User("test user", "can run as"); final Authentication authentication = createAuthentication(new User("run as me", "b"), authenticatedUser); final RoleDescriptor runAsRole = new RoleDescriptor( @@ -1989,7 +1989,7 @@ public void testRunAsRequestWithRunAsUserWithoutPermission() { } public void testRunAsRequestWithValidPermissions() { - TransportRequest request = new GetIndexRequest().indices("b"); + TransportRequest request = new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("b"); User authenticatedUser = new User("test user", "can run as"); final Authentication authentication = createAuthentication(new User("run as me", "b"), authenticatedUser); final RoleDescriptor runAsRole = new RoleDescriptor( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java index e381663d4174e..b984295155c1f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java @@ -1016,6 +1016,7 @@ private TcpChannel getSingleChannel(Transport.Connection connection) { static class TestSecurityNetty4ServerTransport extends SecurityNetty4ServerTransport { private final boolean doHandshake; + private final TransportVersion version; TestSecurityNetty4ServerTransport( Settings settings, @@ -1043,6 +1044,7 @@ static class TestSecurityNetty4ServerTransport extends SecurityNetty4ServerTrans sharedGroupFactory, mock(CrossClusterAccessAuthenticationService.class) ); + this.version = version; this.doHandshake = doHandshake; } @@ -1056,7 +1058,7 @@ public void executeHandshake( if (doHandshake) { super.executeHandshake(node, channel, profile, listener); } else { - assert getVersion().equals(TransportVersion.current()); + assert version.equals(TransportVersion.current()); listener.onResponse(TransportVersions.MINIMUM_COMPATIBLE); } } diff --git a/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle b/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle index 17996ce82a453..5668375403daf 100644 --- a/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle +++ b/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle @@ -28,7 +28,8 @@ tasks.named("forbiddenPatterns").configure { exclude '**/system_key' } -String outputDir = "${buildDir}/generated-resources/${project.name}" +def buildDirectory = layout.buildDirectory +String outputDir = "${buildDirectory.file("generated-resources/${project.name}").get().asFile}" tasks.register("copyTestNodeKeyMaterial", Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', @@ -40,15 +41,15 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> String oldVersion = bwcVersion.toString() // SearchableSnapshotsRollingUpgradeIT uses a specific repository to not interfere with other tests - String searchableSnapshotRepository = "${buildDir}/cluster/shared/searchable-snapshots-repo/${baseName}" - + String searchableSnapshotRepository = "${buildDirectory.file("cluster/shared/searchable-snapshots-repo/${baseName}").get().asFile}" + File repoFolder = buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile def baseCluster = testClusters.register(baseName) { testDistribution = "DEFAULT" versions = [oldVersion, project.version] numberOfNodes = 3 setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - setting 'path.repo', "['${buildDir}/cluster/shared/repo/${baseName}', '${searchableSnapshotRepository}']" + setting 'path.repo', "['${repoFolder}', '${searchableSnapshotRepository}']" setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' @@ -107,15 +108,15 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster baseCluster mustRunAfter("precommit") dependsOn "copyTestNodeKeyMaterial" + def repoDir = buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") + delete(repoDir) delete("${searchableSnapshotRepository}") } - systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.path.searchable.snapshots.repo', searchableSnapshotRepository - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) } @@ -123,9 +124,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oldClusterTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' @@ -137,9 +138,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oneThirdUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', "${-> baseCluster.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' @@ -151,9 +152,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#twoThirdsUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', "${-> baseCluster.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', oldVersion diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index cc01d5b101106..3c7e9310744cb 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -237,7 +237,7 @@ public List getRestHandlers( } List> reservedClusterStateHandlers() { - return List.of(new ReservedSnapshotAction(featureService.get())); + return List.of(new ReservedSnapshotAction()); } @Override diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java index 274dec75865a8..ca4a632f7a76b 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java @@ -15,6 +15,6 @@ public class SnapshotLifecycleFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE); + return Set.of(); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java index e53c167c57908..2c14e0804d1f5 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java @@ -20,8 +20,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.OperationModeUpdateTask; @@ -46,7 +44,6 @@ * task according to the policy's schedule. */ public class SnapshotLifecycleService implements Closeable, ClusterStateListener { - public static final NodeFeature INTERVAL_SCHEDULE = new NodeFeature("slm.interval_schedule", true); private static final Logger logger = LogManager.getLogger(SnapshotLifecycleService.class); private static final String JOB_PATTERN_SUFFIX = "-\\d+$"; @@ -261,18 +258,6 @@ public static void validateMinimumInterval(final SnapshotLifecyclePolicy lifecyc } } - /** - * Validate that interval schedule feature is not supported by all nodes - * @throws IllegalArgumentException if is interval expression but interval schedule not supported - */ - public static void validateIntervalScheduleSupport(String schedule, FeatureService featureService, ClusterState state) { - if (SnapshotLifecyclePolicy.isIntervalSchedule(schedule) && featureService.clusterHasFeature(state, INTERVAL_SCHEDULE) == false) { - throw new IllegalArgumentException( - "Unable to use slm interval schedules in mixed-clusters with nodes that do not support feature " + INTERVAL_SCHEDULE.id() - ); - } - } - @Override public void close() { if (this.running.compareAndSet(true, false)) { diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java index a98e110ed88de..b915f26310c14 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; @@ -41,12 +40,6 @@ public class ReservedSnapshotAction implements ReservedClusterStateHandler prepare(List { private static final Logger logger = LogManager.getLogger(TransportPutSnapshotLifecycleAction.class); - private final FeatureService featureService; @Inject public TransportPutSnapshotLifecycleAction( @@ -58,8 +56,7 @@ public TransportPutSnapshotLifecycleAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - FeatureService featureService + IndexNameExpressionResolver indexNameExpressionResolver ) { super( PutSnapshotLifecycleAction.NAME, @@ -72,7 +69,6 @@ public TransportPutSnapshotLifecycleAction( AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE ); - this.featureService = featureService; } @Override @@ -82,7 +78,6 @@ protected void masterOperation( final ClusterState state, final ActionListener listener ) { - SnapshotLifecycleService.validateIntervalScheduleSupport(request.getLifecycle().getSchedule(), featureService, state); SnapshotLifecycleService.validateRepositoryExists(request.getLifecycle().getRepository(), state); SnapshotLifecycleService.validateMinimumInterval(request.getLifecycle(), state); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java index 9955fe4cf0f95..b48a3be0728e8 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -50,7 +49,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -525,44 +523,6 @@ public void submitUnbatchedStateUpdateTask(String source, ClusterStateUpdateTask } } - public void testValidateIntervalScheduleSupport() { - var featureService = new FeatureService(List.of(new SnapshotLifecycleFeatures())); - { - ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a")).add(DiscoveryNodeUtils.create("b"))) - .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) - .build(); - - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> SnapshotLifecycleService.validateIntervalScheduleSupport("30d", featureService, state) - ); - assertThat(e.getMessage(), containsString("Unable to use slm interval schedules")); - } - { - ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a"))) - .nodeFeatures(Map.of("a", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) - .build(); - try { - SnapshotLifecycleService.validateIntervalScheduleSupport("30d", featureService, state); - } catch (Exception e) { - fail("interval schedule is supported by version and should not fail"); - } - } - { - ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a")).add(DiscoveryNodeUtils.create("b"))) - .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) - .build(); - try { - SnapshotLifecycleService.validateIntervalScheduleSupport("*/1 * * * * ?", featureService, state); - } catch (Exception e) { - fail("cron schedule does not need feature check and should not fail"); - } - } - } - class FakeSnapshotTask extends SnapshotLifecycleTask { private final Consumer onTriggered; diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java index b993633e3d17d..e61e73d656ca7 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -81,7 +80,7 @@ private TransformState processJSON(ReservedSnapshotAction action, TransformState } public void testDependencies() { - var action = new ReservedSnapshotAction(mock(FeatureService.class)); + var action = new ReservedSnapshotAction(); assertThat(action.optionalDependencies(), contains(ReservedRepositoryAction.NAME)); } @@ -91,7 +90,7 @@ public void testValidationFailsNeitherScheduleOrInterval() { final ClusterName clusterName = new ClusterName("elasticsearch"); ClusterState state = ClusterState.builder(clusterName).build(); - ReservedSnapshotAction action = new ReservedSnapshotAction(mock(FeatureService.class)); + ReservedSnapshotAction action = new ReservedSnapshotAction(); TransformState prevState = new TransformState(state, Set.of()); String badPolicyJSON = """ @@ -119,56 +118,6 @@ public void testValidationFailsNeitherScheduleOrInterval() { ); } - public void testIntervalScheduleSupportValidation() { - Client client = mock(Client.class); - when(client.settings()).thenReturn(Settings.EMPTY); - final ClusterName clusterName = new ClusterName("elasticsearch"); - List repositoriesMetadata = List.of(new RepositoryMetadata("repo", "fs", Settings.EMPTY)); - Metadata.Builder mdBuilder = Metadata.builder(); - mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); - ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); - TransformState prevState = new TransformState(state, Set.of()); - String goodPolicyJSON = """ - { - "daily-snapshots": { - "schedule": "30d", - "name": "", - "repository": "repo", - "config": { - "indices": ["foo-*", "important"], - "ignore_unavailable": true, - "include_global_state": false - }, - "retention": { - "expire_after": "30d", - "min_count": 1, - "max_count": 50 - } - } - } - """; - - { - FeatureService featureService = mock(FeatureService.class); - when(featureService.clusterHasFeature(any(), any())).thenReturn(false); - ReservedSnapshotAction action = new ReservedSnapshotAction(featureService); - assertThat( - expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, goodPolicyJSON)).getMessage(), - is("Error on validating SLM requests") - ); - } - { - FeatureService featureService = mock(FeatureService.class); - when(featureService.clusterHasFeature(any(), any())).thenReturn(true); - ReservedSnapshotAction action = new ReservedSnapshotAction(featureService); - try { - processJSON(action, prevState, goodPolicyJSON); - } catch (Exception e) { - fail("interval schedule with interval feature should pass validation"); - } - } - } - public void testActionAddRemove() throws Exception { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); @@ -180,7 +129,7 @@ public void testActionAddRemove() throws Exception { mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); - ReservedSnapshotAction action = new ReservedSnapshotAction(mock(FeatureService.class)); + ReservedSnapshotAction action = new ReservedSnapshotAction(); String emptyJSON = ""; @@ -414,7 +363,7 @@ public void testOperatorControllerFromJSONContent() throws IOException { null, List.of( new ReservedClusterSettingsAction(clusterSettings), - new ReservedSnapshotAction(mock(FeatureService.class)), + new ReservedSnapshotAction(), new ReservedRepositoryAction(repositoriesService) ) ); @@ -448,8 +397,7 @@ public void testPutSLMReservedStateHandler() throws Exception { mock(ClusterService.class), threadPool, mock(ActionFilters.class), - mock(IndexNameExpressionResolver.class), - mock(FeatureService.class) + mock(IndexNameExpressionResolver.class) ); assertThat(putAction.reservedStateHandlerName().get(), equalTo(ReservedSnapshotAction.NAME)); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/20_verify_integrity.yml b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/20_verify_integrity.yml index be6929a15ff44..18707ea2d1d83 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/20_verify_integrity.yml +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/20_verify_integrity.yml @@ -1,9 +1,5 @@ --- setup: - - requires: - cluster_features: "snapshot.repository_verify_integrity" - reason: "required feature" - - do: snapshot.create_repository: repository: test_repo diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/module-info.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/module-info.java index 70385cdc4cf04..e2c3df252ae2e 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/module-info.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/module-info.java @@ -17,8 +17,4 @@ exports org.elasticsearch.repositories.blobstore.testkit.analyze; exports org.elasticsearch.repositories.blobstore.testkit.integrity; - - provides org.elasticsearch.features.FeatureSpecification - with - org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKitFeatures; } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKitFeatures.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKitFeatures.java deleted file mode 100644 index cc513a948519b..0000000000000 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKitFeatures.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.repositories.blobstore.testkit; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -import static org.elasticsearch.repositories.blobstore.testkit.integrity.RestRepositoryVerifyIntegrityAction.REPOSITORY_VERIFY_INTEGRITY_FEATURE; - -public class SnapshotRepositoryTestKitFeatures implements FeatureSpecification { - @Override - public Set getFeatures() { - return Set.of(REPOSITORY_VERIFY_INTEGRITY_FEATURE); - } -} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RestRepositoryVerifyIntegrityAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RestRepositoryVerifyIntegrityAction.java index b07358ab861a1..53db8efde9ead 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RestRepositoryVerifyIntegrityAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RestRepositoryVerifyIntegrityAction.java @@ -8,7 +8,6 @@ package org.elasticsearch.repositories.blobstore.testkit.integrity; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -22,8 +21,6 @@ @ServerlessScope(Scope.INTERNAL) public class RestRepositoryVerifyIntegrityAction extends BaseRestHandler { - public static final NodeFeature REPOSITORY_VERIFY_INTEGRITY_FEATURE = new NodeFeature("snapshot.repository_verify_integrity", true); - @Override public List routes() { return List.of(new Route(POST, "/_snapshot/{repository}/_verify_integrity")); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/snapshot-repo-test-kit/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification deleted file mode 100644 index ae11c3bb39d0b..0000000000000 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ /dev/null @@ -1,8 +0,0 @@ -# -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License -# 2.0; you may not use this file except in compliance with the Elastic License -# 2.0. -# - -org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKitFeatures diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java index 5999a3ff1e151..394edc5df5ea5 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java @@ -9,8 +9,14 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.geo.GeoJson; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.geo.SpatialStrategy; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.MultiPoint; +import org.elasticsearch.geometry.Point; import org.elasticsearch.geometry.utils.GeometryValidator; import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.geometry.utils.WellKnownText; @@ -27,19 +33,28 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.legacygeo.mapper.LegacyGeoShapeFieldMapper; +import org.elasticsearch.legacygeo.mapper.LegacyGeoShapeFieldMapper.GeoShapeFieldType; import org.elasticsearch.test.index.IndexVersionUtils; -import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.ToXContent.MapParams; +import org.elasticsearch.xcontent.ToXContent.Params; +import org.elasticsearch.xpack.spatial.index.mapper.GeometricShapeSyntheticSourceSupport.FieldType; import org.junit.AssumptionViolatedException; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.function.Function; +import static org.elasticsearch.legacygeo.mapper.LegacyGeoShapeFieldMapper.DEPRECATED_PARAMETERS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class GeoShapeWithDocValuesFieldMapperTests extends GeoFieldMapperTests { @@ -289,6 +304,75 @@ public void testInvalidCurrentVersion() { ); } + /** + * Test that we can parse legacy v7 "geo_shape" parameters for BWC with read-only N-2 indices + */ + public void testGeoShapeLegacyParametersParsing() throws Exception { + // deprecated parameters needed for bwc with read-only version 7 indices + assertEquals(Set.of("strategy", "tree", "tree_levels", "precision", "distance_error_pct", "points_only"), DEPRECATED_PARAMETERS); + + for (String deprecatedParam : DEPRECATED_PARAMETERS) { + Object value = switch (deprecatedParam) { + case "tree" -> randomFrom("quadtree", "geohash"); + case "tree_levels" -> 6; + case "distance_error_pct" -> "0.01"; + case "points_only" -> true; + case "strategy" -> "recursive"; + case "precision" -> "50m"; + default -> throw new IllegalStateException("Unexpected value: " + deprecatedParam); + }; + + // indices created before 8 should allow parameters but issue a warning + IndexVersion pre8version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); + MapperService m = createMapperService( + pre8version, + fieldMapping(b -> b.field("type", getFieldName()).field(deprecatedParam, value)) + ); + + // check mapper + Mapper mapper = m.mappingLookup().getMapper("field"); + assertThat(mapper, instanceOf(LegacyGeoShapeFieldMapper.class)); + + // check document parsing + MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); + assertNotNull(m.documentMapper().parse(source(b -> { + b.field("field"); + GeoJson.toXContent(multiPoint, b, null); + }))); + + // check for correct field type and that a query can be created + MappedFieldType fieldType = m.fieldType("field"); + assertThat(fieldType, instanceOf(GeoShapeFieldType.class)); + GeoShapeFieldType ft = (GeoShapeFieldType) fieldType; + + SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); + when(searchExecutionContext.allowExpensiveQueries()).thenReturn(true); + assertNotNull( + ft.geoShapeQuery(searchExecutionContext, "location", SpatialStrategy.TERM, ShapeRelation.INTERSECTS, new Point(-10, 10)) + ); + if (deprecatedParam.equals("strategy") == false) { + assertFieldWarnings(deprecatedParam, "strategy"); + } else { + assertFieldWarnings(deprecatedParam); + } + + // indices created after 8 should throw an error + IndexVersion post8version = IndexVersionUtils.randomCompatibleWriteVersion(random()); + Exception ex = expectThrows( + MapperParsingException.class, + () -> createMapperService(post8version, fieldMapping(b -> b.field("type", getFieldName()).field(deprecatedParam, value))) + ); + assertThat( + ex.getMessage(), + containsString( + "Failed to parse mapping: using deprecated parameters [" + + deprecatedParam + + "] in mapper [field] of type [geo_shape] is no longer allowed" + ) + ); + } + } + public void testGeoShapeLegacyMerge() throws Exception { IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); MapperService m = createMapperService(version, fieldMapping(b -> b.field("type", getFieldName()))); @@ -409,7 +493,7 @@ public void testSelfIntersectPolygon() throws IOException { public String toXContentString(AbstractShapeGeometryFieldMapper mapper, boolean includeDefaults) { if (includeDefaults) { - ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true")); + Params params = new MapParams(Collections.singletonMap("include_defaults", "true")); return Strings.toString(mapper, params); } else { return Strings.toString(mapper); @@ -428,7 +512,7 @@ protected Object generateRandomInputValue(MappedFieldType ft) { @Override protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { - return new GeometricShapeSyntheticSourceSupport(GeometricShapeSyntheticSourceSupport.FieldType.GEO_SHAPE, ignoreMalformed); + return new GeometricShapeSyntheticSourceSupport(FieldType.GEO_SHAPE, ignoreMalformed); } @Override diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java deleted file mode 100644 index f26dba2f33982..0000000000000 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.spatial.index.query; - -import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.geo.GeoJson; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.geo.GeometryTestUtils; -import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.MultiPoint; -import org.elasticsearch.geometry.Point; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.index.mapper.DocumentParsingException; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.legacygeo.mapper.LegacyGeoShapeFieldMapper; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.geo.GeoShapeQueryTestCase; -import org.elasticsearch.test.index.IndexVersionUtils; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; - -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.index.query.QueryBuilders.geoIntersectionQuery; -import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.containsString; - -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") -public class LegacyGeoShapeWithDocValuesQueryTests extends GeoShapeQueryTestCase { - - @SuppressWarnings("deprecation") - private static final String[] PREFIX_TREES = new String[] { - LegacyGeoShapeFieldMapper.PrefixTrees.GEOHASH, - LegacyGeoShapeFieldMapper.PrefixTrees.QUADTREE }; - - @Override - protected Collection> getPlugins() { - return Collections.singleton(LocalStateSpatialPlugin.class); - } - - @Override - protected void createMapping(String indexName, String fieldName, Settings settings) throws Exception { - final XContentBuilder xcb = XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject(fieldName) - .field("type", "geo_shape") - .field("tree", randomFrom(PREFIX_TREES)) - .endObject() - .endObject() - .endObject(); - - final Settings finalSetting; - MapperParsingException ex = expectThrows( - MapperParsingException.class, - () -> indicesAdmin().prepareCreate(indexName).setMapping(xcb).setSettings(settings).get() - ); - assertThat( - ex.getMessage(), - containsString("using deprecated parameters [tree] in mapper [" + fieldName + "] of type [geo_shape] is no longer allowed") - ); - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); - finalSetting = settings(version).put(settings).build(); - indicesAdmin().prepareCreate(indexName).setMapping(xcb).setSettings(finalSetting).get(); - } - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testPointsOnlyExplicit() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject(defaultFieldName) - .field("type", "geo_shape") - .field("tree", randomBoolean() ? "quadtree" : "geohash") - .field("tree_levels", "6") - .field("distance_error_pct", "0.01") - .field("points_only", true) - .endObject() - .endObject() - .endObject() - ); - - MapperParsingException ex = expectThrows( - MapperParsingException.class, - () -> indicesAdmin().prepareCreate("geo_points_only").setMapping(mapping).get() - ); - assertThat( - ex.getMessage(), - containsString( - "using deprecated parameters [points_only, tree, distance_error_pct, tree_levels] " - + "in mapper [geo] of type [geo_shape] is no longer allowed" - ) - ); - - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); - Settings settings = settings(version).build(); - indicesAdmin().prepareCreate("geo_points_only").setMapping(mapping).setSettings(settings).get(); - ensureGreen(); - - // MULTIPOINT - MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - prepareIndex("geo_points_only").setId("1") - .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - // POINT - Point point = GeometryTestUtils.randomPoint(false); - prepareIndex("geo_points_only").setId("2") - .setSource(GeoJson.toXContent(point, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - // test that point was inserted - assertHitCount(client().prepareSearch("geo_points_only").setQuery(matchAllQuery()), 2L); - } - - public void testPointsOnly() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject(defaultFieldName) - .field("type", "geo_shape") - .field("tree", randomBoolean() ? "quadtree" : "geohash") - .field("tree_levels", "6") - .field("distance_error_pct", "0.01") - .field("points_only", true) - .endObject() - .endObject() - .endObject() - ); - - MapperParsingException ex = expectThrows( - MapperParsingException.class, - () -> indicesAdmin().prepareCreate("geo_points_only").setMapping(mapping).get() - ); - assertThat( - ex.getMessage(), - containsString( - "using deprecated parameters [points_only, tree, distance_error_pct, tree_levels] " - + "in mapper [geo] of type [geo_shape] is no longer allowed" - ) - ); - - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); - Settings settings = settings(version).build(); - indicesAdmin().prepareCreate("geo_points_only").setMapping(mapping).setSettings(settings).get(); - ensureGreen(); - - Geometry geometry = GeometryTestUtils.randomGeometry(false); - try { - prepareIndex("geo_points_only").setId("1") - .setSource(GeoJson.toXContent(geometry, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - } catch (DocumentParsingException e) { - // Random geometry generator created something other than a POINT type, verify the correct exception is thrown - assertThat(e.getMessage(), containsString("is configured for points only")); - return; - } - - // test that point was inserted - assertHitCount(client().prepareSearch("geo_points_only").setQuery(geoIntersectionQuery(defaultFieldName, geometry)), 1L); - } - - public void testFieldAlias() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject(defaultFieldName) - .field("type", "geo_shape") - .field("tree", randomBoolean() ? "quadtree" : "geohash") - .endObject() - .startObject("alias") - .field("type", "alias") - .field("path", defaultFieldName) - .endObject() - .endObject() - .endObject() - ); - - MapperParsingException ex = expectThrows( - MapperParsingException.class, - () -> indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get() - ); - assertThat( - ex.getMessage(), - containsString("using deprecated parameters [tree] in mapper [geo] of type [geo_shape] is no longer allowed") - ); - - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); - Settings settings = settings(version).build(); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).setSettings(settings).get(); - ensureGreen(); - - MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - prepareIndex(defaultIndexName).setId("1") - .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - assertHitCount(client().prepareSearch(defaultIndexName).setQuery(geoShapeQuery("alias", multiPoint)), 1L); - } - - protected boolean ignoreLons(double[] lons) { - return Arrays.stream(lons).anyMatch(v -> v == 180); - } -} diff --git a/x-pack/plugin/sql/qa/jdbc/security/build.gradle b/x-pack/plugin/sql/qa/jdbc/security/build.gradle index 4248423d4ff43..bed7ff60107b2 100644 --- a/x-pack/plugin/sql/qa/jdbc/security/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/security/build.gradle @@ -54,18 +54,7 @@ subprojects { dependsOn copyTestClasses classpath += configurations.testArtifacts testClassesDirs = project.files(testArtifactsDir) - - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - - def clusterInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set(taskName) - it.parameters.service = serviceProvider - } - + def clusterInfo = getClusterInfo(taskName); nonInputProperties.systemProperty 'tests.audit.logfile', clusterInfo.map { it.auditLogs.get(0) } nonInputProperties.systemProperty 'tests.audit.yesterday.logfile', clusterInfo.map { it.auditLogs.get(0).getParentFile().toString() + "/javaRestTest_audit-${new Date().format('yyyy-MM-dd')}-1.json" } diff --git a/x-pack/plugin/sql/qa/mixed-node/build.gradle b/x-pack/plugin/sql/qa/mixed-node/build.gradle index 35600fda0eb33..e081790981938 100644 --- a/x-pack/plugin/sql/qa/mixed-node/build.gradle +++ b/x-pack/plugin/sql/qa/mixed-node/build.gradle @@ -45,22 +45,23 @@ buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.3") && mustRunAfter("precommit") testClassesDirs = sourceSets.javaRestTest.output.classesDirs classpath = sourceSets.javaRestTest.runtimeClasspath + def beforeUpdateInfo = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def afterUpdateInfo = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } doFirst { def cluster = baseCluster.get() // Getting the endpoints causes a wait for the cluster - println "Endpoints are: ${-> cluster.allHttpSocketURI.join(",")}" + println "Endpoints are: ${-> beforeUpdateInfo.get()}" println "Upgrading one node to create a mixed cluster" - cluster.nextNodeToNextVersion() - - println "Upgrade complete, endpoints are: ${-> cluster.allHttpSocketURI.join(",")}" - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) - + getRegistry().get().nextNodeToNextVersion(cluster) + println "Upgrade complete, endpoints are: ${-> afterUpdateInfo.get() }" } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') - onlyIf("BWC tests disabled") { project.bwc_tests_enabled } + def bwcEnabled = project.bwc_tests_enabled + onlyIf("BWC tests disabled") { bwcEnabled } } tasks.register(bwcTaskName(bwcVersion)) { diff --git a/x-pack/plugin/sql/qa/server/security/build.gradle b/x-pack/plugin/sql/qa/server/security/build.gradle index 37ae3edaf51db..e00989cbaa89c 100644 --- a/x-pack/plugin/sql/qa/server/security/build.gradle +++ b/x-pack/plugin/sql/qa/server/security/build.gradle @@ -64,12 +64,7 @@ subprojects { TestClustersPlugin.REGISTRY_SERVICE_NAME ) - def clusterInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("javaRestTest") - it.parameters.service = serviceProvider - } - + def clusterInfo = getClusterInfo('javaRestTest') testClassesDirs += project.files(testArtifactsDir) classpath += configurations.testArtifacts nonInputProperties.systemProperty 'tests.audit.logfile', clusterInfo.map { it.auditLogs.get(0) } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml index 2576a51e8b80e..3723f6cd4d450 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml @@ -61,10 +61,6 @@ aggregate_metric_double: --- aggregate_metric_double with ignore_malformed: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml index cffc161b11539..a77a5a775442a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml @@ -257,9 +257,6 @@ --- "mv_dedupe from index #104745": - - requires: - cluster_features: ["esql.mv_ordering_sorted_ascending"] - reason: "fixed by introducing a sorted, non-deduplicated MvOrdering" - do: indices.create: index: idx_with_multivalues diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml index 1567b6b556bdd..e7cda33896149 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml @@ -6,7 +6,7 @@ setup: - method: POST path: /_query parameters: [] - capabilities: [join_lookup_v10] + capabilities: [join_lookup_v11] reason: "uses LOOKUP JOIN" - do: indices.create: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml index ebf464ba667db..b9415bce62ea9 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml @@ -1,7 +1,5 @@ setup: - requires: - cluster_features: ["esql.metrics_counter_fields"] - reason: "require metrics counter fields" test_runner_features: allowed_warnings_regex - do: indices.create: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index 3a989d2c87bf3..bfb49bdc7e5c5 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -574,10 +574,6 @@ setup: --- "values function": - - requires: - cluster_features: esql.agg_values - reason: "values is available in 8.14+" - - do: esql.query: body: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_usage.yml index 731082378fe17..b7fdd16111db8 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_usage.yml @@ -1,8 +1,5 @@ --- logsdb usage: - - requires: - cluster_features: ["logsdb_telemetry_stats"] - reason: "requires stats" - do: indices.create: index: test1 diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml index 5e9faa84ee088..10556a49bcb4e 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/140_synthetic_source.yml @@ -5,10 +5,6 @@ setup: --- "geo_shape": - - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 - - do: indices.create: index: test @@ -72,10 +68,6 @@ setup: --- "geo_shape with ignore_malformed": - - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 - - do: indices.create: index: test @@ -156,10 +148,6 @@ setup: --- "shape": - - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 - - do: indices.create: index: test @@ -223,10 +211,6 @@ setup: --- "shape with ignore_malformed": - - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 - - do: indices.create: index: test @@ -424,8 +408,6 @@ setup: --- "geo_point with ignore_malformed": - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: introduced in 8.15.0 test_runner_features: close_to - do: @@ -504,10 +486,6 @@ setup: --- "point": - - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 - - do: indices.create: index: test @@ -601,10 +579,6 @@ setup: --- "point with ignore_malformed": - - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 - - do: indices.create: index: test diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/20_ignore_above_stored_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/20_ignore_above_stored_source.yml index 252bafbdbe15a..12c2a9858a16b 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/20_ignore_above_stored_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/20_ignore_above_stored_source.yml @@ -1,8 +1,5 @@ --- wildcard field type ignore_above: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml index 26beb3aa19075..5076e7556d911 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/wildcard/30_ignore_above_synthetic_source.yml @@ -5,9 +5,6 @@ setup: --- wildcard field type ignore_above: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java index d414d9be8d178..38171a7249bd8 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.transform.Transform; import org.elasticsearch.xpack.transform.checkpoint.RemoteClusterResolver.ResolvedIndices; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; @@ -281,7 +282,7 @@ private static void getCheckpointsFromOneClusterBWC( ActionListener> listener ) { // 1st get index to see the indexes the user has access to - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(indices) + GetIndexRequest getIndexRequest = new GetIndexRequest(Transform.HARD_CODED_TRANSFORM_MASTER_NODE_TIMEOUT).indices(indices) .features(new GetIndexRequest.Feature[0]) .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java index 6268162625977..f087357921c68 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformDestIndexSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; +import org.elasticsearch.xpack.transform.Transform; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import java.time.Clock; @@ -66,7 +67,7 @@ private TransformIndex() {} * Returns {@code true} if the given index was created by the transform and {@code false} otherwise. */ public static void isDestinationIndexCreatedByTransform(Client client, String destIndex, ActionListener listener) { - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(destIndex) + GetIndexRequest getIndexRequest = new GetIndexRequest(Transform.HARD_CODED_TRANSFORM_MASTER_NODE_TIMEOUT).indices(destIndex) // We only need mappings, more specifically its "_meta" part .features(GetIndexRequest.Feature.MAPPINGS); executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, GetIndexAction.INSTANCE, getIndexRequest, ActionListener.wrap(getIndexResponse -> { diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 626e1e1e9e9d5..957a23e68a5dc 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -266,7 +266,7 @@ private void createWatcherIndicesOrAliases() throws Exception { } public void replaceWatcherIndexWithRandomlyNamedIndex(String originalIndexOrAlias, String to) { - GetIndexResponse index = indicesAdmin().prepareGetIndex().setIndices(originalIndexOrAlias).get(); + GetIndexResponse index = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(originalIndexOrAlias).get(); MappingMetadata mapping = index.getMappings().get(index.getIndices()[0]); Settings settings = index.getSettings().get(index.getIndices()[0]); diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java index 3f4b51b266a62..7a0bb73870739 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java @@ -62,7 +62,7 @@ public void testGetNotFound() throws Exception { // if the watches index is an alias, remove the alias randomly, otherwise the index if (randomBoolean()) { try { - GetIndexResponse indexResponse = indicesAdmin().prepareGetIndex().setIndices(Watch.INDEX).get(); + GetIndexResponse indexResponse = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(Watch.INDEX).get(); boolean isWatchIndexAlias = Watch.INDEX.equals(indexResponse.indices()[0]) == false; if (isWatchIndexAlias) { assertAcked(indicesAdmin().prepareAliases().removeAlias(indexResponse.indices()[0], Watch.INDEX)); diff --git a/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml b/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml index 14e4a6f5aaef8..4eaf89d0bf67a 100644 --- a/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml +++ b/x-pack/plugin/wildcard/src/yamlRestTest/resources/rest-api-spec/test/30_synthetic_source.yml @@ -5,10 +5,6 @@ setup: --- synthetic source: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires synthetic source support - - do: indices.create: index: synthetic_source_test @@ -47,10 +43,6 @@ synthetic source: --- synthetic source with copy_to: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: synthetic_source_test diff --git a/x-pack/qa/mixed-tier-cluster/build.gradle b/x-pack/qa/mixed-tier-cluster/build.gradle index bee28c47dc867..40454b2a290ce 100644 --- a/x-pack/qa/mixed-tier-cluster/build.gradle +++ b/x-pack/qa/mixed-tier-cluster/build.gradle @@ -40,14 +40,14 @@ buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.9.0") && tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { useCluster baseCluster mustRunAfter("precommit") + def beforeEndpoints = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } doFirst { // Getting the endpoints causes a wait for the cluster - println "Endpoints are: ${-> baseCluster.get().allHttpSocketURI.join(",")}" - baseCluster.get().nextNodeToNextVersion() - - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) + println "Endpoints are: ${-> beforeEndpoints.get()}" + getRegistry().get().nextNodeToNextVersion(baseCluster) } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.clustername', baseName) onlyIf("BWC tests disabled") { project.bwc_tests_enabled } } diff --git a/x-pack/qa/repository-old-versions/README.md b/x-pack/qa/repository-old-versions/README.md new file mode 100644 index 0000000000000..057877ecc99d6 --- /dev/null +++ b/x-pack/qa/repository-old-versions/README.md @@ -0,0 +1,16 @@ + +### Project repository-old-versions + +Test project, for Lucene indices backward compatibility with versions before N-2 +(Archive-indices). + +The project aims to do the following +1. Deploy a cluster in version 5 / 6 +2. Create an index, add a document, verify index integrity, create a snapshot +3. Deploy a cluster in the Current version +4. Restore an index and verify index integrity + + + + + diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java index 95bc92d4f185a..e594655ed21a6 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -25,6 +26,7 @@ import org.elasticsearch.core.Booleans; import org.elasticsearch.core.PathUtils; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; @@ -36,6 +38,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; @@ -80,9 +83,9 @@ public void setupIndex() throws IOException { String snapshotName = "snap"; List indices; if (oldVersion.before(Version.fromString("6.0.0"))) { - indices = Arrays.asList("filebeat", "winlogbeat", "custom", "nested"); + indices = Arrays.asList("filebeat", "winlogbeat", "custom", "nested", "standard_token_filter"); } else { - indices = Arrays.asList("filebeat", "custom", "nested"); + indices = Arrays.asList("filebeat", "custom", "nested", "standard_token_filter"); } int oldEsPort = Integer.parseInt(System.getProperty("tests.es.port")); @@ -92,6 +95,20 @@ public void setupIndex() throws IOException { if (oldVersion.before(Version.fromString("6.0.0"))) { assertOK(oldEs.performRequest(createIndex("winlogbeat", "winlogbeat.json"))); } + assertOK( + oldEs.performRequest( + createIndex( + "standard_token_filter", + "standard_token_filter.json", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .put("index.analysis.analyzer.custom_analyzer.filter", "standard") + .build() + ) + ) + ); assertOK(oldEs.performRequest(createIndex("custom", "custom.json"))); assertOK(oldEs.performRequest(createIndex("nested", "nested.json"))); @@ -143,6 +160,12 @@ public void setupIndex() throws IOException { doc3.setJsonEntity(Strings.toString(bodyDoc3)); assertOK(oldEs.performRequest(doc3)); + Request doc4 = new Request("POST", "/" + "standard_token_filter" + "/" + "doc"); + doc4.addParameter("refresh", "true"); + XContentBuilder bodyDoc4 = XContentFactory.jsonBuilder().startObject().field("content", "Doc 1").endObject(); + doc4.setJsonEntity(Strings.toString(bodyDoc4)); + assertOK(oldEs.performRequest(doc4)); + // register repo on old ES and take snapshot Request createRepoRequest = new Request("PUT", "/_snapshot/" + repoName); createRepoRequest.setJsonEntity(Strings.format(""" @@ -174,15 +197,21 @@ public void setupIndex() throws IOException { } private Request createIndex(String indexName, String file) throws IOException { + return createIndex(indexName, file, Settings.EMPTY); + } + + private Request createIndex(String indexName, String file, Settings settings) throws IOException { Request createIndex = new Request("PUT", "/" + indexName); int numberOfShards = randomIntBetween(1, 3); - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject("settings") - .field("index.number_of_shards", numberOfShards) - .endObject() - .startObject("mappings"); + XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); + + builder.startObject("settings"); + builder.field(SETTING_NUMBER_OF_SHARDS, numberOfShards); + settings.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + builder.startObject("mappings"); builder.rawValue(OldMappingsIT.class.getResourceAsStream(file), XContentType.JSON); builder.endObject().endObject(); @@ -202,6 +231,21 @@ public void testMappingOk() throws IOException { } } + public void testStandardTokenFilter() throws IOException { + Request search = new Request("POST", "/" + "standard_token_filter" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("query") + .startObject("match_all") + .endObject() + .endObject() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + Map response = entityAsMap(client().performRequest(search)); + List hits = (List) (XContentMapValues.extractValue("hits.hits", response)); + assertThat(hits, hasSize(1)); + } + public void testSearchKeyword() throws IOException { Request search = new Request("POST", "/" + "custom" + "/_search"); XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) diff --git a/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/standard_token_filter.json b/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/standard_token_filter.json new file mode 100644 index 0000000000000..dfaab0dfd60e1 --- /dev/null +++ b/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/standard_token_filter.json @@ -0,0 +1,8 @@ +"_default_": { + "properties": { + "content": { + "type": "text", + "analyzer": "custom_analyzer" + } + } +} diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 9a447f35eb13c..18feb654804be 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -33,7 +33,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> mustRunAfter("precommit") systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) } @@ -42,9 +42,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oldClusterTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' @@ -55,9 +55,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oneThirdUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' @@ -68,9 +68,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#twoThirdsUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', oldVersion diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle index ebcb4cd9760fe..b7b46d432823d 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle +++ b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle @@ -38,48 +38,44 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster baseLeaderCluster useCluster baseFollowerCluster systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '') - + def baseClusterName = getName().substring(0, getName().lastIndexOf("#")).replace('#', '-') + def baseCluster = testClusters.named(baseClusterName) doFirst { - def baseCluster = testClusters.named("${baseName}-${kindExt}").get() if (name.endsWith("#clusterTest") == false) { println "Upgrade node $it" - baseCluster.nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.allHttpSocketURI.join(",")) - nonInputProperties.systemProperty('tests.clustername', baseName) - nonInputProperties.systemProperty('tests.leader_host', baseLeaderCluster.map(c->c.allHttpSocketURI.last())) - nonInputProperties.systemProperty('tests.leader_remote_cluster_seed', baseLeaderCluster.map(c -> c.allTransportPortURI.last())) - nonInputProperties.systemProperty('tests.follower_host', baseFollowerCluster.map(c -> c.allHttpSocketURI.last())) - nonInputProperties.systemProperty('tests.follower_remote_cluster_seed', baseFollowerCluster.map(c -> c.allTransportPortURI.last())) } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseCluster.name).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.clustername', baseName) + nonInputProperties.systemProperty('tests.leader_host', getClusterInfo(baseLeaderCluster.name).map { c->c.allHttpSocketURI.last() }) + nonInputProperties.systemProperty('tests.leader_remote_cluster_seed', getClusterInfo(baseLeaderCluster.name).map { c -> c.allTransportPortURI.last() }) + nonInputProperties.systemProperty('tests.follower_host', getClusterInfo(baseFollowerCluster.name).map { c->c.allHttpSocketURI.last() }) + nonInputProperties.systemProperty('tests.follower_remote_cluster_seed', getClusterInfo(baseFollowerCluster.name).map { c -> c.allTransportPortURI.last() }) } ["follower", "leader"].each { kind -> tasks.register("${baseName}#${kind}#clusterTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.upgrade_state', 'none' systemProperty 'tests.rest.cluster_name', kind - ext.kindExt = kind } tasks.register("${baseName}#${kind}#oneThirdUpgradedTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.upgrade_state', 'one_third' systemProperty 'tests.rest.cluster_name', kind dependsOn "${baseName}#leader#clusterTest", "${baseName}#follower#clusterTest" - ext.kindExt = kind } tasks.register("${baseName}#${kind}#twoThirdsUpgradedTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.upgrade_state', 'two_third' systemProperty 'tests.rest.cluster_name', kind dependsOn "${baseName}#${kind}#oneThirdUpgradedTest" - ext.kindExt = kind } tasks.register("${baseName}#${kind}#upgradedClusterTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.upgrade_state', 'all' systemProperty 'tests.rest.cluster_name', kind dependsOn "${baseName}#${kind}#twoThirdsUpgradedTest" - ext.kindExt = kind } } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index f4c9f8f7ea2b0..e45571fd7056e 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -29,7 +29,7 @@ tasks.named("forbiddenPatterns").configure { exclude '**/system_key' } -String outputDir = "${buildDir}/generated-resources/${project.name}" +String outputDir = "${layout.buildDirectory.get().asFile}/generated-resources/${project.name}" tasks.register("copyTestNodeKeyMaterial", Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', @@ -41,7 +41,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> String oldVersion = bwcVersion.toString() // SearchableSnapshotsRollingUpgradeIT uses a specific repository to not interfere with other tests - String searchableSnapshotRepository = "${buildDir}/cluster/shared/searchable-snapshots-repo/${baseName}" + String searchableSnapshotRepository = "${layout.buildDirectory.get().asFile}/cluster/shared/searchable-snapshots-repo/${baseName}" def baseCluster = testClusters.register(baseName) { testDistribution = "DEFAULT" @@ -56,7 +56,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> } setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - setting 'path.repo', "['${buildDir}/cluster/shared/repo/${baseName}', '${searchableSnapshotRepository}']" + setting 'path.repo', "['${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}', '${searchableSnapshotRepository}']" setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' @@ -125,14 +125,14 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> mustRunAfter("precommit") dependsOn "copyTestNodeKeyMaterial" doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") + delete("${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}") delete("${searchableSnapshotRepository}") } systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.path.searchable.snapshots.repo', searchableSnapshotRepository - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) // Disable ML tests for incompatible systems @@ -146,9 +146,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oldClusterTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' @@ -183,9 +183,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oneThirdUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', "${-> baseCluster.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' @@ -203,9 +203,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#twoThirdsUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', "${-> baseCluster.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', oldVersion diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java index 094ca9304a695..846c3c47a2714 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java @@ -26,7 +26,9 @@ import java.time.Instant; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.elasticsearch.upgrades.IndexingIT.assertCount; import static org.hamcrest.Matchers.equalTo; @@ -256,6 +258,7 @@ private static void createAndRolloverDataStream(String dataStreamName, int numRo } private void upgradeDataStream(String dataStreamName, int numRolloversOnOldCluster) throws Exception { + Set indicesNeedingUpgrade = getDataStreamIndices(dataStreamName); final int explicitRolloverOnNewClusterCount = randomIntBetween(0, 2); for (int i = 0; i < explicitRolloverOnNewClusterCount; i++) { rollover(dataStreamName); @@ -292,10 +295,9 @@ private void upgradeDataStream(String dataStreamName, int numRolloversOnOldClust } else { // The number of rollovers that will have happened when we call reindex: final int rolloversPerformedByReindex = explicitRolloverOnNewClusterCount == 0 ? 1 : 0; - assertThat( - statusResponseMap.get("total_indices_in_data_stream"), - equalTo(originalWriteIndex + numRolloversOnOldCluster + explicitRolloverOnNewClusterCount + rolloversPerformedByReindex) - ); + final int expectedTotalIndicesInDataStream = originalWriteIndex + numRolloversOnOldCluster + + explicitRolloverOnNewClusterCount + rolloversPerformedByReindex; + assertThat(statusResponseMap.get("total_indices_in_data_stream"), equalTo(expectedTotalIndicesInDataStream)); /* * total_indices_requiring_upgrade is made up of: (the original write index) + numRolloversOnOldCluster. The number of * rollovers on the upgraded cluster is irrelevant since those will not be reindexed. @@ -305,6 +307,11 @@ private void upgradeDataStream(String dataStreamName, int numRolloversOnOldClust equalTo(originalWriteIndex + numRolloversOnOldCluster) ); assertThat(statusResponseMap.get("successes"), equalTo(numRolloversOnOldCluster + 1)); + // We expect all the original indices to have been deleted + for (String oldIndex : indicesNeedingUpgrade) { + assertThat(indexExists(oldIndex), equalTo(false)); + } + assertThat(getDataStreamIndices(dataStreamName).size(), equalTo(expectedTotalIndicesInDataStream)); } }, 60, TimeUnit.SECONDS); Request cancelRequest = new Request("POST", "_migration/reindex/" + dataStreamName + "/_cancel"); @@ -312,6 +319,16 @@ private void upgradeDataStream(String dataStreamName, int numRolloversOnOldClust assertOK(cancelResponse); } + @SuppressWarnings("unchecked") + private Set getDataStreamIndices(String dataStreamName) throws IOException { + Response response = client().performRequest(new Request("GET", "_data_stream/" + dataStreamName)); + Map responseMap = XContentHelper.convertToMap(JsonXContent.jsonXContent, response.getEntity().getContent(), false); + List> dataStreams = (List>) responseMap.get("data_streams"); + Map dataStream = dataStreams.get(0); + List> indices = (List>) dataStream.get("indices"); + return indices.stream().map(index -> index.get("index_name").toString()).collect(Collectors.toSet()); + } + /* * Similar to isOriginalClusterCurrent, but returns true if the major versions of the clusters are the same. So true * for 8.6 and 8.17, but false for 7.17 and 8.18. diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java deleted file mode 100644 index 915122c97d3f1..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.upgrades; - -import org.elasticsearch.Version; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; - -import java.io.IOException; -import java.util.List; -import java.util.Locale; -import java.util.Map; - -import static org.elasticsearch.TransportVersions.V_8_15_0; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.containsInAnyOrder; - -public class SecurityIndexRoleMappingCleanupIT extends AbstractUpgradeTestCase { - private static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); - - public void testCleanupDuplicateMappings() throws Exception { - // see build.gradle where we set operator/settings.json for more details on this skip - assumeTrue( - "Cluster requires version higher than since operator/settings.json is only set then: " + Version.V_8_7_0, - UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_7_0) - ); - if (CLUSTER_TYPE == ClusterType.OLD) { - // If we're in a state where the same operator-defined role mappings can exist both in cluster state and the native store - // (V_8_15_0 transport added to security.role_mapping_cleanup feature added), create a state - // where the native store will need to be cleaned up - assumeTrue( - "Cleanup only needed before security.role_mapping_cleanup feature available in cluster", - clusterHasFeature("security.role_mapping_cleanup") == false - ); - assumeTrue( - "If role mappings are in cluster state but cleanup has not been performed yet, create duplicated role mappings", - minimumTransportVersion().onOrAfter(V_8_15_0) - ); - // Since the old cluster has role mappings in cluster state, but doesn't check duplicates, create duplicates - createNativeRoleMapping("operator_role_mapping_1", Map.of("meta", "test"), true); - createNativeRoleMapping("operator_role_mapping_2", Map.of("meta", "test"), true); - } else if (CLUSTER_TYPE == ClusterType.MIXED) { - // Create a native role mapping that doesn't conflict with anything before the migration run - createNativeRoleMapping("no_name_conflict", Map.of("meta", "test")); - } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { - waitForSecurityMigrationCompletion(adminClient(), 2); - assertAllRoleMappings( - client(), - "operator_role_mapping_1" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, - "operator_role_mapping_2" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, - "no_name_conflict" - ); - // In the old cluster we might have created these (depending on the node features), so make sure they were removed - assertFalse(roleMappingExistsInSecurityIndex("operator_role_mapping_1")); - assertFalse(roleMappingExistsInSecurityIndex("operator_role_mapping_2")); - assertTrue(roleMappingExistsInSecurityIndex("no_name_conflict")); - // Make sure we can create and delete a conflicting role mapping again - createNativeRoleMapping("operator_role_mapping_1", Map.of("meta", "test"), true); - deleteNativeRoleMapping("operator_role_mapping_1", true); - } - } - - @SuppressWarnings("unchecked") - private boolean roleMappingExistsInSecurityIndex(String mappingName) throws IOException { - final Request request = new Request("POST", "/.security/_search"); - request.setJsonEntity(String.format(Locale.ROOT, """ - {"query":{"bool":{"must":[{"term":{"_id":"%s_%s"}}]}}}""", "role-mapping", mappingName)); - - request.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7]," - + " but in a future major version, direct access to system indices will be prevented by default" - ) - ); - - Response response = adminClient().performRequest(request); - assertOK(response); - final Map responseMap = responseAsMap(response); - - Map hits = ((Map) responseMap.get("hits")); - return ((List) hits.get("hits")).isEmpty() == false; - } - - private void createNativeRoleMapping(String roleMappingName, Map metadata) throws IOException { - createNativeRoleMapping(roleMappingName, metadata, false); - } - - private void createNativeRoleMapping(String roleMappingName, Map metadata, boolean expectWarning) throws IOException { - final Request request = new Request("POST", "/_security/role_mapping/" + roleMappingName); - if (expectWarning) { - request.setOptions( - expectWarnings( - "A read-only role mapping with the same name [" - + roleMappingName - + "] has been previously defined in a configuration file. " - + "Both role mappings will be used to determine role assignments." - ) - ); - } - - BytesReference source = BytesReference.bytes( - jsonBuilder().map( - Map.of( - ExpressionRoleMapping.Fields.ROLES.getPreferredName(), - List.of("superuser"), - ExpressionRoleMapping.Fields.ENABLED.getPreferredName(), - true, - ExpressionRoleMapping.Fields.RULES.getPreferredName(), - Map.of("field", Map.of("username", "role-mapping-test-user")), - RoleDescriptor.Fields.METADATA.getPreferredName(), - metadata - ) - ) - ); - request.setJsonEntity(source.utf8ToString()); - assertOK(client().performRequest(request)); - } - - private void deleteNativeRoleMapping(String roleMappingName, boolean expectWarning) throws IOException { - final Request request = new Request("DELETE", "/_security/role_mapping/" + roleMappingName); - if (expectWarning) { - request.setOptions( - expectWarnings( - "A read-only role mapping with the same name [" - + roleMappingName - + "] has previously been defined in a configuration file. " - + "The native role mapping was deleted, but the read-only mapping will remain active " - + "and will be used to determine role assignments." - ) - ); - } - assertOK(client().performRequest(request)); - } - - private void assertAllRoleMappings(RestClient client, String... roleNames) throws IOException { - Request request = new Request("GET", "/_security/role_mapping"); - Response response = client.performRequest(request); - assertOK(response); - Map responseMap = responseAsMap(response); - - assertThat(responseMap.keySet(), containsInAnyOrder(roleNames)); - assertThat(responseMap.size(), is(roleNames.length)); - } -} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java index 6c34e68297aa0..df8290327ee5a 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java @@ -39,10 +39,6 @@ public void testRoleMigration() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { createRoleWithMetadata(oldTestRole, Map.of("meta", "test")); assertDocInSecurityIndex(oldTestRole); - if (canRolesBeMigrated() == false) { - assertNoMigration(adminClient()); - assertCannotQueryRolesByMetadata(client()); - } } else if (CLUSTER_TYPE == ClusterType.MIXED) { if (FIRST_MIXED_ROUND) { createRoleWithMetadata(mixed1TestRole, Map.of("meta", "test")); @@ -51,13 +47,8 @@ public void testRoleMigration() throws Exception { createRoleWithMetadata(mixed2TestRole, Map.of("meta", "test")); assertDocInSecurityIndex(mixed2TestRole); } - if (canRolesBeMigrated() == false) { - assertNoMigration(adminClient()); - assertCannotQueryRolesByMetadata(client()); - } } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { createRoleWithMetadata(upgradedTestRole, Map.of("meta", "test")); - assertTrue(canRolesBeMigrated()); waitForSecurityMigrationCompletion(adminClient(), 1); assertMigratedDocInSecurityIndex(oldTestRole, "meta", "test"); assertMigratedDocInSecurityIndex(mixed1TestRole, "meta", "test"); @@ -196,9 +187,4 @@ private void assertAllRoles(RestClient client, String... roleNames) throws IOExc assertThat(roles.get(i).get("name"), equalTo(roleNames[i])); } } - - private boolean canRolesBeMigrated() { - return clusterHasFeature("security.migration_framework") != false - && clusterHasFeature("security.roles_metadata_flattened") != false; - } }