From 265c70423bb390c78d6d82527864653829379f4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Slobodan=20Adamovi=C4=87?= Date: Tue, 3 Sep 2024 09:58:18 +0200 Subject: [PATCH 001/115] [DOCS] Add missing ELASTIC_PASSWORD in docker-compose (#112372) This PR adds missing ELASTIC_PASSWORD environment variable to es02 and es03 nodes. Resolves https://github.com/elastic/elasticsearch/issues/112235 --- docs/reference/setup/install/docker/docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/setup/install/docker/docker-compose.yml b/docs/reference/setup/install/docker/docker-compose.yml index 4b4ecf401b7d4..15d8c11e2f12f 100644 --- a/docs/reference/setup/install/docker/docker-compose.yml +++ b/docs/reference/setup/install/docker/docker-compose.yml @@ -117,6 +117,7 @@ services: - cluster.name=${CLUSTER_NAME} - cluster.initial_master_nodes=es01,es02,es03 - discovery.seed_hosts=es01,es03 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} - bootstrap.memory_lock=true - xpack.security.enabled=true - xpack.security.http.ssl.enabled=true @@ -156,6 +157,7 @@ services: - cluster.name=${CLUSTER_NAME} - cluster.initial_master_nodes=es01,es02,es03 - discovery.seed_hosts=es01,es02 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} - bootstrap.memory_lock=true - xpack.security.enabled=true - xpack.security.http.ssl.enabled=true From 9f98c44d10a8d27e0b56b15b8d8c966e32a35ab2 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 3 Sep 2024 18:07:30 +1000 Subject: [PATCH 002/115] Add 5 min initial delay for undesired allocation warning (#112427) When a cluster just starts up, all shards may reside on a single node. When a new node joins, it is likely half of the shards needs to relocate to the new node. This temporary undesired allocation is expected and should quickly resolve itself. This PR adds a 5 min initial delay so that the cluster does not log warning in such situation. Resolves: ES-9174 --- .../allocator/DesiredBalanceReconciler.java | 5 ++++- .../allocator/FrequencyCappedAction.java | 10 +++------- .../allocator/DesiredBalanceReconcilerTests.java | 10 ++++++++-- .../allocator/FrequencyCappedActionTests.java | 16 ++++++++++++---- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 24e7abca45d2d..3b1f6392ff7dc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -89,7 +89,10 @@ public class DesiredBalanceReconciler { private final DoubleGauge undesiredAllocationsRatio; public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, MeterRegistry meterRegistry) { - this.undesiredAllocationLogInterval = new FrequencyCappedAction(threadPool); + this.undesiredAllocationLogInterval = new FrequencyCappedAction( + threadPool.relativeTimeInMillisSupplier(), + TimeValue.timeValueMinutes(5) + ); clusterSettings.initializeAndWatch(UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING, this.undesiredAllocationLogInterval::setMinInterval); clusterSettings.initializeAndWatch( UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedAction.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedAction.java index 85ec145783023..2531655608ebb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedAction.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.threadpool.ThreadPool; import java.util.function.LongSupplier; @@ -21,15 +20,12 @@ public class FrequencyCappedAction { private final LongSupplier currentTimeMillisSupplier; private TimeValue minInterval; - private long next = -1; + private long next; - public FrequencyCappedAction(ThreadPool threadPool) { - this(threadPool.relativeTimeInMillisSupplier()); - } - - public FrequencyCappedAction(LongSupplier currentTimeMillisSupplier) { + public FrequencyCappedAction(LongSupplier currentTimeMillisSupplier, TimeValue initialDelay) { this.currentTimeMillisSupplier = currentTimeMillisSupplier; this.minInterval = TimeValue.MAX_VALUE; + this.next = currentTimeMillisSupplier.getAsLong() + initialDelay.getMillis(); } public void setMinInterval(TimeValue minInterval) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 615405645d6b2..326cda70dde82 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; @@ -1281,9 +1282,12 @@ public void testShouldLogOnTooManyUndesiredAllocations() { .build(); var threadPool = mock(ThreadPool.class); - when(threadPool.relativeTimeInMillisSupplier()).thenReturn(new AtomicLong()::incrementAndGet); + final var timeInMillisSupplier = new AtomicLong(); + when(threadPool.relativeTimeInMillisSupplier()).thenReturn(timeInMillisSupplier::incrementAndGet); var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, mock(MeterRegistry.class)); + final long initialDelayInMillis = TimeValue.timeValueMinutes(5).getMillis(); + timeInMillisSupplier.addAndGet(randomLongBetween(initialDelayInMillis, 2 * initialDelayInMillis)); var expectedWarningMessage = "[100%] of assigned shards (" + shardCount @@ -1323,7 +1327,9 @@ public void testShouldLogOnTooManyUndesiredAllocations() { } private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { - new DesiredBalanceReconciler(createBuiltInClusterSettings(), mock(ThreadPool.class), mock(MeterRegistry.class)).reconcile( + final var threadPool = mock(ThreadPool.class); + when(threadPool.relativeTimeInMillisSupplier()).thenReturn(new AtomicLong()::incrementAndGet); + new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, mock(MeterRegistry.class)).reconcile( desiredBalance, routingAllocation ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedActionTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedActionTests.java index 0e730c3647db7..fa2b926f11a63 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import java.util.concurrent.atomic.AtomicLong; @@ -21,22 +22,29 @@ public void testFrequencyCapExecution() { var executions = new AtomicLong(0); var currentTime = new AtomicLong(); - var action = new FrequencyCappedAction(currentTime::get); + final TimeValue initialDelay = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueSeconds(between(1, 300)); + var action = new FrequencyCappedAction(currentTime::get, initialDelay); var minInterval = timeValueMillis(randomNonNegativeInt()); action.setMinInterval(minInterval); - // initial execution should happen action.maybeExecute(executions::incrementAndGet); + if (initialDelay != TimeValue.ZERO) { + // Not executing due to initial delay + assertThat(executions.get(), equalTo(0L)); + currentTime.addAndGet(randomLongBetween(initialDelay.millis(), initialDelay.millis() * 2)); + action.maybeExecute(executions::incrementAndGet); + } + // initial execution should happen assertThat(executions.get(), equalTo(1L)); // should not execute again too soon - currentTime.set(randomLongBetween(0, minInterval.millis() - 1)); + currentTime.addAndGet(randomLongBetween(0, minInterval.millis() - 1)); action.maybeExecute(executions::incrementAndGet); assertThat(executions.get(), equalTo(1L)); // should execute min interval elapsed - currentTime.set(randomLongBetween(minInterval.millis(), Long.MAX_VALUE)); + currentTime.addAndGet(randomLongBetween(minInterval.millis(), Long.MAX_VALUE)); action.maybeExecute(executions::incrementAndGet); assertThat(executions.get(), equalTo(2L)); } From 06aa32b312d922ac364fb94cb2ff322ac831827b Mon Sep 17 00:00:00 2001 From: Ido Cohen <90558359+CohenIdo@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:40:25 +0300 Subject: [PATCH 003/115] [Cloud Security][CDR] Update Wiz vuln privileges --- .../KibanaOwnedReservedRoleDescriptors.java | 99 ++++++++++++++----- .../authz/store/ReservedRolesStoreTests.java | 30 +++++- 2 files changed, 102 insertions(+), 27 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index a0fe3d09eccc7..36d0240ed765b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -30,7 +30,8 @@ import static org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore.getRemoteIndicesReadPrivileges; /** - * This exists in a separate file so it can be assigned to the Kibana security team in the CODEOWNERS file + * This exists in a separate file so it can be assigned to the Kibana security + * team in the CODEOWNERS file */ class KibanaOwnedReservedRoleDescriptors { @@ -87,9 +88,11 @@ static RoleDescriptor kibanaSystem(String name) { SuggestProfilesAction.NAME, ProfileHasPrivilegesAction.NAME, "write_fleet_secrets", - // To facilitate ML UI functionality being controlled using Kibana security privileges + // To facilitate ML UI functionality being controlled using Kibana security + // privileges "manage_ml", - // The symbolic constant for this one is in SecurityActionMapper, so not accessible from X-Pack core + // The symbolic constant for this one is in SecurityActionMapper, so not + // accessible from X-Pack core "cluster:admin/analyze", // To facilitate using the file uploader functionality "monitor_text_structure", @@ -104,7 +107,8 @@ static RoleDescriptor kibanaSystem(String name) { .build(), RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".management-beats").privileges("create_index", "read", "write").build(), - // To facilitate ML UI functionality being controlled using Kibana security privileges + // To facilitate ML UI functionality being controlled using Kibana security + // privileges RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*", ".ml-stats-*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder() .indices(".ml-annotations*", ".ml-notifications*") @@ -139,12 +143,14 @@ static RoleDescriptor kibanaSystem(String name) { RoleDescriptor.IndicesPrivileges.builder().indices("traces-apm.*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices("traces-apm-*").privileges("read", "read_cross_cluster").build(), - // Logstash telemetry queries of kibana task runner to access Logstash metric indices + // Logstash telemetry queries of kibana task runner to access Logstash metric + // indices RoleDescriptor.IndicesPrivileges.builder().indices("metrics-logstash.*").privileges("read").build(), // Data telemetry reads mappings, metadata and stats of indices RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("view_index_metadata", "monitor").build(), - // Endpoint diagnostic information. Kibana reads from these indices to send telemetry + // Endpoint diagnostic information. Kibana reads from these indices to send + // telemetry RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.diagnostic.collection-*").privileges("read").build(), // Fleet secrets. Kibana can only write to this index. RoleDescriptor.IndicesPrivileges.builder() @@ -152,7 +158,8 @@ static RoleDescriptor kibanaSystem(String name) { .privileges("write", "delete", "create_index") .allowRestrictedIndices(true) .build(), - // Other Fleet indices. Kibana reads and writes to these indices to manage Elastic Agents. + // Other Fleet indices. Kibana reads and writes to these indices to manage + // Elastic Agents. RoleDescriptor.IndicesPrivileges.builder() .indices(".fleet-actions*") .privileges("all") @@ -217,26 +224,32 @@ static RoleDescriptor kibanaSystem(String name) { .indices(ReservedRolesStore.LISTS_INDEX, ReservedRolesStore.LISTS_ITEMS_INDEX) .privileges("all") .build(), - // "Alerts as data" internal backing indices used in Security Solution, Observability, etc. - // Kibana system user creates these indices; reads / writes to them via the aliases (see below). + // "Alerts as data" internal backing indices used in Security Solution, + // Observability, etc. + // Kibana system user creates these indices; reads / writes to them via the + // aliases (see below). RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_BACKING_INDEX).privileges("all").build(), - // "Alerts as data" public index aliases used in Security Solution, Observability, etc. + // "Alerts as data" public index aliases used in Security Solution, + // Observability, etc. // Kibana system user uses them to read / write alerts. RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_INDEX_ALIAS).privileges("all").build(), // "Alerts as data" public index alias used in Security Solution // Kibana system user uses them to read / write alerts. RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.PREVIEW_ALERTS_INDEX_ALIAS).privileges("all").build(), // "Alerts as data" internal backing indices used in Security Solution - // Kibana system user creates these indices; reads / writes to them via the aliases (see below). + // Kibana system user creates these indices; reads / writes to them via the + // aliases (see below). RoleDescriptor.IndicesPrivileges.builder() .indices(ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_ALIAS) .privileges("all") .build(), - // Endpoint / Fleet policy responses. Kibana requires read access to send telemetry + // Endpoint / Fleet policy responses. Kibana requires read access to send + // telemetry RoleDescriptor.IndicesPrivileges.builder().indices("metrics-endpoint.policy-*").privileges("read").build(), // Endpoint metrics. Kibana requires read access to send telemetry RoleDescriptor.IndicesPrivileges.builder().indices("metrics-endpoint.metrics-*").privileges("read").build(), - // Endpoint events. Kibana reads endpoint alert lineage for building and sending telemetry + // Endpoint events. Kibana reads endpoint alert lineage for building and sending + // telemetry RoleDescriptor.IndicesPrivileges.builder().indices("logs-endpoint.events.*").privileges("read").build(), // Fleet package install and upgrade RoleDescriptor.IndicesPrivileges.builder() @@ -261,39 +274,48 @@ static RoleDescriptor kibanaSystem(String name) { "indices:admin/data_stream/lifecycle/put" ) .build(), - // Endpoint specific action responses. Kibana reads and writes (for third party agents) to the index + // Endpoint specific action responses. Kibana reads and writes (for third party + // agents) to the index // to display action responses to the user. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.action.responses-*") .privileges("auto_configure", "read", "write") .build(), - // Endpoint specific actions. Kibana reads and writes to this index to track new actions and display them. + // Endpoint specific actions. Kibana reads and writes to this index to track new + // actions and display them. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.actions-*") .privileges("auto_configure", "read", "write") .build(), - // Legacy Osquery manager specific action responses. Kibana reads from these to display responses to the user. + // Legacy Osquery manager specific action responses. Kibana reads from these to + // display responses to the user. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-osquery_manager.action.responses-*") .privileges("auto_configure", "create_index", "read", "index", "delete") .build(), - // Osquery manager specific action responses. Kibana reads from these to display responses to the user. + // Osquery manager specific action responses. Kibana reads from these to display + // responses to the user. RoleDescriptor.IndicesPrivileges.builder() .indices("logs-osquery_manager.action.responses-*") .privileges("read", "view_index_metadata") .build(), - // Osquery manager specific actions. Kibana reads and writes to this index to track new actions and display them. + // Osquery manager specific actions. Kibana reads and writes to this index to + // track new actions and display them. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-osquery_manager.actions-*") .privileges("auto_configure", "create_index", "read", "index", "write", "delete") .build(), - // Third party agent (that use non-Elastic Defend integrations) info logs indices. + // Third party agent (that use non-Elastic Defend integrations) info logs + // indices. // Kibana reads from these to display agent status/info to the user. - // These are indices that filebeat writes to, and the data in these indices are ingested by Fleet integrations - // in order to provide support for response actions related to malicious events for such agents. + // These are indices that filebeat writes to, and the data in these indices are + // ingested by Fleet integrations + // in order to provide support for response actions related to malicious events + // for such agents. RoleDescriptor.IndicesPrivileges.builder().indices("logs-sentinel_one.*", "logs-crowdstrike.*").privileges("read").build(), - // For ILM policy for APM, Endpoint, & Synthetics packages that have delete action + // For ILM policy for APM, Endpoint, & Synthetics packages that have delete + // action RoleDescriptor.IndicesPrivileges.builder() .indices( ".logs-endpoint.diagnostic.collection-*", @@ -332,7 +354,8 @@ static RoleDescriptor kibanaSystem(String name) { TransportUpdateSettingsAction.TYPE.name() ) .build(), - // For destination indices of the Threat Intel (ti_*) packages that ships a transform for supporting IOC expiration + // For destination indices of the Threat Intel (ti_*) packages that ships a + // transform for supporting IOC expiration RoleDescriptor.IndicesPrivileges.builder() .indices("logs-ti_*_latest.*") .privileges( @@ -346,7 +369,8 @@ static RoleDescriptor kibanaSystem(String name) { TransportUpdateSettingsAction.TYPE.name() ) .build(), - // For source indices of the Threat Intel (ti_*) packages that ships a transform for supporting IOC expiration + // For source indices of the Threat Intel (ti_*) packages that ships a transform + // for supporting IOC expiration RoleDescriptor.IndicesPrivileges.builder() .indices("logs-ti_*.*-*") .privileges( @@ -370,7 +394,8 @@ static RoleDescriptor kibanaSystem(String name) { TransportUpdateSettingsAction.TYPE.name() ) .build(), - // For src/dest indices of the Cloud Security Posture packages that ships a transform + // For src/dest indices of the Cloud Security Posture packages that ships a + // transform RoleDescriptor.IndicesPrivileges.builder() .indices("logs-cloud_security_posture.findings-*", "logs-cloud_security_posture.vulnerabilities-*") .privileges("read", "view_index_metadata") @@ -390,6 +415,27 @@ static RoleDescriptor kibanaSystem(String name) { TransportUpdateSettingsAction.TYPE.name() ) .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("logs-wiz.vulnerability-*") + .privileges("read", "view_index_metadata") + .build(), + RoleDescriptor.IndicesPrivileges.builder() + // manage privilege required by the index alias + .indices("security_solution-*.vulnerability_latest") + .privileges("manage", TransportIndicesAliasesAction.NAME, TransportUpdateSettingsAction.TYPE.name()) + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("security_solution-*.vulnerability_latest-*") + .privileges( + "create_index", + "index", + "manage", + "read", + "delete", + TransportIndicesAliasesAction.NAME, + TransportUpdateSettingsAction.TYPE.name() + ) + .build(), RoleDescriptor.IndicesPrivileges.builder().indices("risk-score.risk-*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder() .indices(".asset-criticality.asset-criticality-*") @@ -403,7 +449,8 @@ static RoleDescriptor kibanaSystem(String name) { // SLO observability solution internal indices // Kibana system user uses them to read / write slo data. RoleDescriptor.IndicesPrivileges.builder().indices(".slo-observability.*").privileges("all").build(), - // Endpoint heartbeat. Kibana reads from these to determine metering/billing for endpoints. + // Endpoint heartbeat. Kibana reads from these to determine metering/billing for + // endpoints. RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read").build(), // For connectors telemetry. Will be removed once we switched to connectors API RoleDescriptor.IndicesPrivileges.builder().indices(".elastic-connectors*").privileges("read").build() }, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 0cdf7de63ca99..258b2378b8a1c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1609,13 +1609,41 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); + Arrays.asList("logs-wiz.vulnerability-" + randomAlphaOfLength(randomIntBetween(0, 13))).forEach((cspIndex) -> { + final IndexAbstraction indexAbstraction = mockIndexAbstraction(cspIndex); + assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:bar").test(indexAbstraction), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), + is(false) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), + is(false) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); + }); + Arrays.asList( "logs-cloud_security_posture.findings_latest-default", "logs-cloud_security_posture.scores-default", "logs-cloud_security_posture.vulnerabilities_latest-default", "logs-cloud_security_posture.findings_latest-default-" + Version.CURRENT, "logs-cloud_security_posture.scores-default-" + Version.CURRENT, - "logs-cloud_security_posture.vulnerabilities_latest-default" + Version.CURRENT + "logs-cloud_security_posture.vulnerabilities_latest-default" + Version.CURRENT, + "security_solution-*.vulnerability_latest-" + Version.CURRENT ).forEach(indexName -> { logger.info("index name [{}]", indexName); final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); From 90f1fb667c20ed98627e9ad1233dd3d6b7920a25 Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Tue, 3 Sep 2024 12:46:20 +0200 Subject: [PATCH 004/115] [ES|QL] Document return value for locate in case substring is not found (#112202) * Document return value for locate in case substring is not found * Add note that string positions start from 1 --- .../reference/esql/functions/description/locate.asciidoc | 2 +- .../esql/functions/kibana/definition/locate.json | 2 +- docs/reference/esql/functions/kibana/docs/locate.md | 2 ++ .../qa/testFixtures/src/main/resources/meta.csv-spec | 2 +- .../esql/expression/function/scalar/string/Locate.java | 9 ++++----- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/reference/esql/functions/description/locate.asciidoc b/docs/reference/esql/functions/description/locate.asciidoc index e5a6fba512432..b3f9d2a1ad78e 100644 --- a/docs/reference/esql/functions/description/locate.asciidoc +++ b/docs/reference/esql/functions/description/locate.asciidoc @@ -2,4 +2,4 @@ *Description* -Returns an integer that indicates the position of a keyword substring within another string. +Returns an integer that indicates the position of a keyword substring within another string. Returns `0` if the substring cannot be found. Note that string positions start from `1`. diff --git a/docs/reference/esql/functions/kibana/definition/locate.json b/docs/reference/esql/functions/kibana/definition/locate.json index 2097c90b41958..a9ddc8c52368a 100644 --- a/docs/reference/esql/functions/kibana/definition/locate.json +++ b/docs/reference/esql/functions/kibana/definition/locate.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "locate", - "description" : "Returns an integer that indicates the position of a keyword substring within another string.", + "description" : "Returns an integer that indicates the position of a keyword substring within another string.\nReturns `0` if the substring cannot be found.\nNote that string positions start from `1`.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/docs/locate.md b/docs/reference/esql/functions/kibana/docs/locate.md index 75275068d3096..412832e9b1587 100644 --- a/docs/reference/esql/functions/kibana/docs/locate.md +++ b/docs/reference/esql/functions/kibana/docs/locate.md @@ -4,6 +4,8 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ ### LOCATE Returns an integer that indicates the position of a keyword substring within another string. +Returns `0` if the substring cannot be found. +Note that string positions start from `1`. ``` row a = "hello" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index f1f66a9cb990c..325b984c36d34 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -284,7 +284,7 @@ ip_prefix |Truncates an IP to a given prefix length. least |Returns the minimum value from multiple columns. This is similar to <> except it is intended to run on multiple columns at once. left |Returns the substring that extracts 'length' chars from 'string' starting from the left. length |Returns the character length of a string. -locate |Returns an integer that indicates the position of a keyword substring within another string. +locate |Returns an integer that indicates the position of a keyword substring within another string. Returns `0` if the substring cannot be found. Note that string positions start from `1`. log |Returns the logarithm of a value to a base. The input can be any numeric value, the return value is always a double. Logs of zero, negative numbers, and base of one return `null` as well as a warning. log10 |Returns the logarithm of a value to base 10. The input can be any numeric value, the return value is always a double. Logs of 0 and negative numbers return `null` as well as a warning. ltrim |Removes leading whitespaces from a string. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Locate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Locate.java index ea088bdc412e8..f6eff2fcbd6b3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Locate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Locate.java @@ -46,11 +46,10 @@ public class Locate extends EsqlScalarFunction implements OptionalArgument { private final Expression substr; private final Expression start; - @FunctionInfo( - returnType = "integer", - description = "Returns an integer that indicates the position of a keyword substring within another string.", - examples = @Example(file = "string", tag = "locate") - ) + @FunctionInfo(returnType = "integer", description = """ + Returns an integer that indicates the position of a keyword substring within another string. + Returns `0` if the substring cannot be found. + Note that string positions start from `1`.""", examples = @Example(file = "string", tag = "locate")) public Locate( Source source, @Param(name = "string", type = { "keyword", "text" }, description = "An input string") Expression str, From 6d4d437cb109e3029ece465c78a78f63d2d8f4eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 3 Sep 2024 13:51:37 +0200 Subject: [PATCH 005/115] Mute MetadataCreateIndexServiceTests.testValidateDotIndex because of Lucene RegExp tilde operator changes --- .../metadata/MetadataCreateIndexServiceTests.java | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index f7d343b43b29c..0f2217fcb6936 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -8,6 +8,10 @@ package org.elasticsearch.cluster.metadata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.Operations; +import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.TransportVersion; @@ -612,9 +616,20 @@ public void testCalculateNumRoutingShards() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/112453") public void testValidateDotIndex() { List systemIndexDescriptors = new ArrayList<>(); systemIndexDescriptors.add(SystemIndexDescriptorUtils.createUnmanaged(".test-one*", "test")); + //TODO Lucene 10 upgrade + // The "~" operator in Rexeg Automata doesn't seem to work as expected any more without minimization + Automaton patternAutomaton = new RegExp("\\.test-~(one.*)").toAutomaton(); + assertTrue( + new CharacterRunAutomaton(Operations.determinize(patternAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT)).run( + ".test-~(one.*)" + ) + ); + // TODO remove this smoke test ^^^ once the issue is fixed + systemIndexDescriptors.add(SystemIndexDescriptorUtils.createUnmanaged(".test-~(one*)", "test")); systemIndexDescriptors.add(SystemIndexDescriptorUtils.createUnmanaged(".pattern-test*", "test-1")); From fdddde5fa42d47ff55be6bd91d85310f51941611 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 3 Sep 2024 13:52:13 +0200 Subject: [PATCH 006/115] Revert "Mute MetadataCreateIndexServiceTests.testValidateDotIndex because of Lucene RegExp tilde operator changes" This reverts commit 6d4d437cb109e3029ece465c78a78f63d2d8f4eb. --- .../metadata/MetadataCreateIndexServiceTests.java | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 0f2217fcb6936..f7d343b43b29c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -8,10 +8,6 @@ package org.elasticsearch.cluster.metadata; -import org.apache.lucene.util.automaton.Automaton; -import org.apache.lucene.util.automaton.CharacterRunAutomaton; -import org.apache.lucene.util.automaton.Operations; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.TransportVersion; @@ -616,20 +612,9 @@ public void testCalculateNumRoutingShards() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/112453") public void testValidateDotIndex() { List systemIndexDescriptors = new ArrayList<>(); systemIndexDescriptors.add(SystemIndexDescriptorUtils.createUnmanaged(".test-one*", "test")); - //TODO Lucene 10 upgrade - // The "~" operator in Rexeg Automata doesn't seem to work as expected any more without minimization - Automaton patternAutomaton = new RegExp("\\.test-~(one.*)").toAutomaton(); - assertTrue( - new CharacterRunAutomaton(Operations.determinize(patternAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT)).run( - ".test-~(one.*)" - ) - ); - // TODO remove this smoke test ^^^ once the issue is fixed - systemIndexDescriptors.add(SystemIndexDescriptorUtils.createUnmanaged(".test-~(one*)", "test")); systemIndexDescriptors.add(SystemIndexDescriptorUtils.createUnmanaged(".pattern-test*", "test-1")); From 301f3fb2ab65ee3e940b7b0099d394b0a3f1f087 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 3 Sep 2024 23:18:24 +1000 Subject: [PATCH 007/115] Mute org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests testAggregateIntermediate {TestCase= #2} #112461 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index cfa67ba495324..c1df0a7bf6300 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -179,6 +179,9 @@ tests: - class: org.elasticsearch.xpack.spatial.index.query.LegacyGeoShapeWithDocValuesQueryTests method: testIndexPointsFromLine issue: https://github.com/elastic/elasticsearch/issues/112438 +- class: org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests + method: "testAggregateIntermediate {TestCase= #2}" + issue: https://github.com/elastic/elasticsearch/issues/112461 # Examples: # From 2a897d9e607cf4d86ce0fc69101e7e16af61a5aa Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 3 Sep 2024 23:18:45 +1000 Subject: [PATCH 008/115] Mute org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests testAggregateIntermediate {TestCase=} #112463 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c1df0a7bf6300..14dbba8dfdc0f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,6 +182,9 @@ tests: - class: org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests method: "testAggregateIntermediate {TestCase= #2}" issue: https://github.com/elastic/elasticsearch/issues/112461 +- class: org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests + method: testAggregateIntermediate {TestCase=} + issue: https://github.com/elastic/elasticsearch/issues/112463 # Examples: # From cf4182d68f2311d49c9a8b9a87cdde9bec164107 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 3 Sep 2024 17:08:29 +0200 Subject: [PATCH 009/115] ES|QL: add Telemetry API and track top functions (#111226) This adds `MeterRegistry` (ie. [APM telemetry](https://github.com/elastic/elasticsearch/blob/main/modules/apm/METERING.md)) to ESQL. It also adds logic to track most used commands (as it already did with the phone-nome API) and most used functions. The responsibility of collecting telemetry is now on EsqlSession, so that it can easily be delegated to lower level components if we want to collect further information. The legacy Telemetry is still there, untouched --- docs/changelog/111226.yaml | 5 + .../testFixtures/src/main/resources/README.md | 2 +- .../xpack/esql/action/TelemetryIT.java | 269 ++++++++++++++++++ .../xpack/esql/analysis/Analyzer.java | 6 +- .../xpack/esql/execution/PlanExecutor.java | 17 +- .../xpack/esql/parser/LogicalPlanBuilder.java | 8 +- .../xpack/esql/plan/logical/Aggregate.java | 8 + .../xpack/esql/plan/logical/Dissect.java | 5 + .../xpack/esql/plan/logical/Drop.java | 5 + .../xpack/esql/plan/logical/Enrich.java | 5 + .../xpack/esql/plan/logical/EsRelation.java | 5 + .../xpack/esql/plan/logical/Eval.java | 5 + .../xpack/esql/plan/logical/Explain.java | 5 + .../xpack/esql/plan/logical/Filter.java | 5 + .../xpack/esql/plan/logical/Grok.java | 5 + .../xpack/esql/plan/logical/InlineStats.java | 5 + .../xpack/esql/plan/logical/Keep.java | 5 + .../xpack/esql/plan/logical/Limit.java | 5 + .../xpack/esql/plan/logical/LogicalPlan.java | 2 + .../xpack/esql/plan/logical/Lookup.java | 5 + .../xpack/esql/plan/logical/MvExpand.java | 5 + .../xpack/esql/plan/logical/OrderBy.java | 5 + .../xpack/esql/plan/logical/Project.java | 8 + .../xpack/esql/plan/logical/Rename.java | 5 + .../xpack/esql/plan/logical/Row.java | 5 + .../xpack/esql/plan/logical/TopN.java | 7 + .../esql/plan/logical/UnresolvedRelation.java | 27 +- .../xpack/esql/plan/logical/join/Join.java | 5 + .../plan/logical/local/LocalRelation.java | 8 + .../esql/plan/logical/meta/MetaFunctions.java | 5 + .../esql/plan/logical/show/ShowInfo.java | 5 + .../xpack/esql/plugin/EsqlPlugin.java | 2 +- .../xpack/esql/session/EsqlSession.java | 42 +-- .../xpack/esql/stats/PlanningMetrics.java | 41 +++ .../esql/stats/PlanningMetricsManager.java | 94 ++++++ .../elasticsearch/xpack/esql/CsvTests.java | 4 +- .../xpack/esql/analysis/AnalyzerTests.java | 3 +- .../LocalLogicalPlanOptimizerTests.java | 5 + .../esql/parser/StatementParserTests.java | 12 +- .../xpack/esql/plan/logical/PhasedTests.java | 5 + .../esql/stats/PlanExecutorMetricsTests.java | 3 +- 41 files changed, 620 insertions(+), 53 deletions(-) create mode 100644 docs/changelog/111226.yaml create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/PlanningMetrics.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/PlanningMetricsManager.java diff --git a/docs/changelog/111226.yaml b/docs/changelog/111226.yaml new file mode 100644 index 0000000000000..1021a26fa789f --- /dev/null +++ b/docs/changelog/111226.yaml @@ -0,0 +1,5 @@ +pr: 111226 +summary: "ES|QL: add Telemetry API and track top functions" +area: ES|QL +type: enhancement +issues: [] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/README.md b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/README.md index 0f3a0c236eed9..7792a9e0dbc56 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/README.md +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/README.md @@ -188,7 +188,7 @@ It is preferable to use `EsqlCapabilities` for new features, although all existi ### Warnings -Some queries can return warnings, eg. for number overflows or when a multi-value is passed to a funciton +Some queries can return warnings, eg. for number overflows or when a multi-value is passed to a function that does not support it. Each CSV-SPEC test has to also assert all the expected warnings. diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java new file mode 100644 index 0000000000000..6dfc2401f5033 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TelemetryIT.java @@ -0,0 +1,269 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.xpack.esql.stats.PlanningMetricsManager; +import org.junit.Before; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +public class TelemetryIT extends AbstractEsqlIntegTestCase { + + record Test(String query, Map expectedCommands, Map expectedFunctions, boolean success) {} + + private final Test testCase; + + public TelemetryIT(@Name("TestCase") Test test) { + this.testCase = test; + } + + @ParametersFactory + public static Iterable parameters() { + return List.of( + new Object[] { + new Test( + """ + FROM idx + | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) + | STATS s = COUNT(*) by ip + | KEEP ip + | EVAL a = 10""", + Map.ofEntries(Map.entry("FROM", 1), Map.entry("EVAL", 2), Map.entry("STATS", 1), Map.entry("KEEP", 1)), + Map.ofEntries(Map.entry("TO_IP", 1), Map.entry("TO_STRING", 2), Map.entry("COUNT", 1)), + true + ) }, + new Object[] { + new Test( + "FROM idx | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) " + + "| STATS s = COUNT(*) by ip | KEEP ip | EVAL a = non_existing", + Map.ofEntries(Map.entry("FROM", 1), Map.entry("EVAL", 2), Map.entry("STATS", 1), Map.entry("KEEP", 1)), + Map.ofEntries(Map.entry("TO_IP", 1), Map.entry("TO_STRING", 2), Map.entry("COUNT", 1)), + false + ) }, + new Object[] { + new Test( + """ + FROM idx + | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) + | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) + | STATS s = COUNT(*) by ip | KEEP ip | EVAL a = 10 + """, + Map.ofEntries(Map.entry("FROM", 1), Map.entry("EVAL", 3), Map.entry("STATS", 1), Map.entry("KEEP", 1)), + Map.ofEntries(Map.entry("TO_IP", 2), Map.entry("TO_STRING", 4), Map.entry("COUNT", 1)), + true + ) }, + new Object[] { + new Test( + """ + FROM idx | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) + | WHERE id is not null AND id > 100 AND host RLIKE \".*foo\" + | eval a = 10 + | drop host + | rename a as foo + | DROP foo + """, // lowercase on purpose + Map.ofEntries( + Map.entry("FROM", 1), + Map.entry("EVAL", 2), + Map.entry("WHERE", 1), + Map.entry("DROP", 2), + Map.entry("RENAME", 1) + ), + Map.ofEntries(Map.entry("TO_IP", 1), Map.entry("TO_STRING", 2)), + true + ) }, + new Object[] { + new Test( + """ + FROM idx + | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) + | GROK host "%{WORD:name} %{WORD}" + | DISSECT host "%{surname}" + """, + Map.ofEntries(Map.entry("FROM", 1), Map.entry("EVAL", 1), Map.entry("GROK", 1), Map.entry("DISSECT", 1)), + Map.ofEntries(Map.entry("TO_IP", 1), Map.entry("TO_STRING", 2)), + true + ) }, + new Object[] { + new Test("METRICS idx | LIMIT 10", Map.ofEntries(Map.entry("METRICS", 1), Map.entry("LIMIT", 1)), Map.ofEntries(), true) }, + new Object[] { + new Test( + "METRICS idx max(id) BY host | LIMIT 10", + Map.ofEntries(Map.entry("METRICS", 1), Map.entry("LIMIT", 1), Map.entry("FROM TS", 1)), + Map.ofEntries(Map.entry("MAX", 1)), + true + ) }, + new Object[] { + new Test( + """ + FROM idx + | EVAL ip = to_ip(host), x = to_string(host), y = to_string(host) + | INLINESTATS max(id) + """, + Map.ofEntries(Map.entry("FROM", 1), Map.entry("EVAL", 1), Map.entry("INLINESTATS", 1)), + Map.ofEntries(Map.entry("MAX", 1), Map.entry("TO_IP", 1), Map.entry("TO_STRING", 2)), + true + ) } + ); + } + + @Before + public void init() { + DiscoveryNode dataNode = randomDataNode(); + final String nodeName = dataNode.getName(); + loadData(nodeName); + } + + public void testMetrics() throws Exception { + DiscoveryNode dataNode = randomDataNode(); + testQuery(dataNode, testCase); + } + + private static void testQuery(DiscoveryNode dataNode, Test test) throws InterruptedException { + testQuery(dataNode, test.query, test.success, test.expectedCommands, test.expectedFunctions); + } + + private static void testQuery( + DiscoveryNode dataNode, + String query, + Boolean success, + Map expectedCommands, + Map expectedFunctions + ) throws InterruptedException { + final var plugins = internalCluster().getInstance(PluginsService.class, dataNode.getName()) + .filterPlugins(TestTelemetryPlugin.class) + .toList(); + assertThat(plugins, hasSize(1)); + TestTelemetryPlugin plugin = plugins.get(0); + + try { + int successIterations = randomInt(10); + for (int i = 0; i < successIterations; i++) { + EsqlQueryRequest request = executeQuery(query); + CountDownLatch latch = new CountDownLatch(1); + + final long iteration = i + 1; + client(dataNode.getName()).execute(EsqlQueryAction.INSTANCE, request, ActionListener.running(() -> { + try { + // test total commands used + final List commandMeasurementsAll = measurements(plugin, PlanningMetricsManager.FEATURE_METRICS_ALL); + assertAllUsages(expectedCommands, commandMeasurementsAll, iteration, success); + + // test num of queries using a command + final List commandMeasurements = measurements(plugin, PlanningMetricsManager.FEATURE_METRICS); + assertUsageInQuery(expectedCommands, commandMeasurements, iteration, success); + + // test total functions used + final List functionMeasurementsAll = measurements(plugin, PlanningMetricsManager.FUNCTION_METRICS_ALL); + assertAllUsages(expectedFunctions, functionMeasurementsAll, iteration, success); + + // test number of queries using a function + final List functionMeasurements = measurements(plugin, PlanningMetricsManager.FUNCTION_METRICS); + assertUsageInQuery(expectedFunctions, functionMeasurements, iteration, success); + } finally { + latch.countDown(); + } + })); + latch.await(30, TimeUnit.SECONDS); + } + } finally { + plugin.resetMeter(); + } + + } + + private static void assertAllUsages(Map expected, List metrics, long iteration, Boolean success) { + Set found = featureNames(metrics); + assertThat(found, is(expected.keySet())); + for (Measurement metric : metrics) { + assertThat(metric.attributes().get(PlanningMetricsManager.SUCCESS), is(success)); + String featureName = (String) metric.attributes().get(PlanningMetricsManager.FEATURE_NAME); + assertThat(metric.getLong(), is(iteration * expected.get(featureName))); + } + } + + private static void assertUsageInQuery(Map expected, List found, long iteration, Boolean success) { + Set functionsFound; + functionsFound = featureNames(found); + assertThat(functionsFound, is(expected.keySet())); + for (Measurement measurement : found) { + assertThat(measurement.attributes().get(PlanningMetricsManager.SUCCESS), is(success)); + assertThat(measurement.getLong(), is(iteration)); + } + } + + private static List measurements(TestTelemetryPlugin plugin, String metricKey) { + return Measurement.combine(plugin.getLongCounterMeasurement(metricKey)); + } + + private static Set featureNames(List functionMeasurements) { + return functionMeasurements.stream() + .map(x -> x.attributes().get(PlanningMetricsManager.FEATURE_NAME)) + .map(String.class::cast) + .collect(Collectors.toSet()); + } + + private static EsqlQueryRequest executeQuery(String query) { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(randomPragmas()); + return request; + } + + private static void loadData(String nodeName) { + int numDocs = randomIntBetween(1, 15); + assertAcked( + client().admin() + .indices() + .prepareCreate("idx") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", nodeName) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)) + ) + .setMapping("host", "type=keyword", "id", "type=long") + ); + for (int i = 0; i < numDocs; i++) { + client().prepareIndex("idx").setSource("host", "192." + i, "id", i).get(); + } + + client().admin().indices().prepareRefresh("idx").get(); + } + + private DiscoveryNode randomDataNode() { + return randomFrom(clusterService().state().nodes().getDataNodes().values()); + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), TestTelemetryPlugin.class); + } + +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 5baced5bc93f2..664c9bffb6499 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -190,7 +190,8 @@ protected LogicalPlan rule(UnresolvedRelation plan, AnalyzerContext context) { plan.frozen(), plan.metadataFields(), plan.indexMode(), - context.indexResolution().toString() + context.indexResolution().toString(), + plan.commandName() ); } TableIdentifier table = plan.table(); @@ -202,7 +203,8 @@ protected LogicalPlan rule(UnresolvedRelation plan, AnalyzerContext context) { plan.frozen(), plan.metadataFields(), plan.indexMode(), - "invalid [" + table + "] resolution to [" + context.indexResolution() + "]" + "invalid [" + table + "] resolution to [" + context.indexResolution() + "]", + plan.commandName() ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index 88dfb7a377aa9..441fd91ee6b35 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.execution; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.PreAnalyzer; import org.elasticsearch.xpack.esql.analysis.Verifier; @@ -22,6 +23,8 @@ import org.elasticsearch.xpack.esql.session.IndexResolver; import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.stats.Metrics; +import org.elasticsearch.xpack.esql.stats.PlanningMetrics; +import org.elasticsearch.xpack.esql.stats.PlanningMetricsManager; import org.elasticsearch.xpack.esql.stats.QueryMetric; import java.util.function.BiConsumer; @@ -36,14 +39,16 @@ public class PlanExecutor { private final Mapper mapper; private final Metrics metrics; private final Verifier verifier; + private final PlanningMetricsManager planningMetricsManager; - public PlanExecutor(IndexResolver indexResolver) { + public PlanExecutor(IndexResolver indexResolver, MeterRegistry meterRegistry) { this.indexResolver = indexResolver; this.preAnalyzer = new PreAnalyzer(); this.functionRegistry = new EsqlFunctionRegistry(); this.mapper = new Mapper(functionRegistry); this.metrics = new Metrics(); this.verifier = new Verifier(metrics); + this.planningMetricsManager = new PlanningMetricsManager(meterRegistry); } public void esql( @@ -54,6 +59,7 @@ public void esql( BiConsumer> runPhase, ActionListener listener ) { + final PlanningMetrics planningMetrics = new PlanningMetrics(); final var session = new EsqlSession( sessionId, cfg, @@ -63,13 +69,18 @@ public void esql( functionRegistry, new LogicalPlanOptimizer(new LogicalOptimizerContext(cfg)), mapper, - verifier + verifier, + planningMetrics ); QueryMetric clientId = QueryMetric.fromString("rest"); metrics.total(clientId); - session.execute(request, runPhase, wrap(listener::onResponse, ex -> { + session.execute(request, runPhase, wrap(x -> { + planningMetricsManager.publish(planningMetrics, true); + listener.onResponse(x); + }, ex -> { // TODO when we decide if we will differentiate Kibana from REST, this String value will likely come from the request metrics.failed(clientId); + planningMetricsManager.publish(planningMetrics, false); listener.onFailure(ex); })); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index ffd2375a688ad..cc6273d4de292 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -288,7 +288,8 @@ public LogicalPlan visitFromCommand(EsqlBaseParser.FromCommandContext ctx) { false, List.of(metadataMap.values().toArray(Attribute[]::new)), IndexMode.STANDARD, - null + null, + "FROM" ); } @@ -497,7 +498,7 @@ public LogicalPlan visitMetricsCommand(EsqlBaseParser.MetricsCommandContext ctx) TableIdentifier table = new TableIdentifier(source, null, visitIndexPattern(ctx.indexPattern())); if (ctx.aggregates == null && ctx.grouping == null) { - return new UnresolvedRelation(source, table, false, List.of(), IndexMode.STANDARD, null); + return new UnresolvedRelation(source, table, false, List.of(), IndexMode.STANDARD, null, "METRICS"); } final Stats stats = stats(source, ctx.grouping, ctx.aggregates); var relation = new UnresolvedRelation( @@ -506,7 +507,8 @@ public LogicalPlan visitMetricsCommand(EsqlBaseParser.MetricsCommandContext ctx) false, List.of(new MetadataAttribute(source, MetadataAttribute.TSID_FIELD, DataType.KEYWORD, false)), IndexMode.TIME_SERIES, - null + null, + "FROM TS" ); return new Aggregate(source, relation, Aggregate.AggregateType.METRICS, stats.groupings, stats.aggregates); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java index 5b6fe8c0112c6..7ed2d04400be1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java @@ -124,6 +124,14 @@ public List aggregates() { return aggregates; } + @Override + public String commandName() { + return switch (aggregateType) { + case STANDARD -> "STATS"; + case METRICS -> "METRICS"; + }; + } + @Override public boolean expressionsResolved() { return Resolvables.resolved(groupings) && Resolvables.resolved(aggregates); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java index ae9805124890a..a83e102e51005 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java @@ -123,6 +123,11 @@ public boolean equals(Object o) { return Objects.equals(parser, dissect.parser); } + @Override + public String commandName() { + return "DISSECT"; + } + @Override public int hashCode() { return Objects.hash(super.hashCode(), parser); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java index 2df8727cf0e65..b32139c18e08e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java @@ -38,6 +38,11 @@ public List removals() { return removals; } + @Override + public String commandName() { + return "DROP"; + } + @Override public boolean expressionsResolved() { return Resolvables.resolved(removals); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java index a2b560d14ae21..762d5ffcc4532 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java @@ -191,6 +191,11 @@ public Mode mode() { return mode; } + @Override + public String commandName() { + return "ENRICH"; + } + @Override public boolean expressionsResolved() { return policyName.resolved() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java index b080c425d2312..0043362f23b87 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java @@ -140,6 +140,11 @@ public List output() { return attrs; } + @Override + public String commandName() { + return "FROM"; + } + @Override public boolean expressionsResolved() { // For unresolved expressions to exist in EsRelation is fine, as long as they are not used in later operations diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java index 2cecef42a42ac..6b217a7a81541 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java @@ -113,6 +113,11 @@ private List renameAliases(List originalAttributes, List n return newFieldsWithUpdatedRefs; } + @Override + public String commandName() { + return "EVAL"; + } + @Override public boolean expressionsResolved() { return Resolvables.resolved(fields); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Explain.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Explain.java index 009c9519a9fe5..38e7c19522df6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Explain.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Explain.java @@ -69,6 +69,11 @@ public List output() { ); } + @Override + public String commandName() { + return "EXPLAIN"; + } + @Override public boolean expressionsResolved() { return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Filter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Filter.java index aca1a83344f32..611793b583d7a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Filter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Filter.java @@ -62,6 +62,11 @@ public Expression condition() { return condition; } + @Override + public String commandName() { + return "WHERE"; + } + @Override public boolean expressionsResolved() { return condition.resolved(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java index 2f1c468415aa3..fcfd1ac0f04da 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java @@ -148,6 +148,11 @@ public boolean equals(Object o) { return Objects.equals(parser, grok.parser); } + @Override + public String commandName() { + return "GROK"; + } + @Override public int hashCode() { return Objects.hash(super.hashCode(), parser); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java index b37976c00ad06..dd71d1d85c8e2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java @@ -112,6 +112,11 @@ public List aggregates() { return aggregates; } + @Override + public String commandName() { + return "INLINESTATS"; + } + @Override public boolean expressionsResolved() { return Resolvables.resolved(groupings) && Resolvables.resolved(aggregates); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Keep.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Keep.java index c1c8c9aff5ca6..4c03d68e6e6f7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Keep.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Keep.java @@ -44,4 +44,9 @@ public int hashCode() { public boolean equals(Object obj) { return super.equals(obj); } + + @Override + public String commandName() { + return "KEEP"; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Limit.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Limit.java index 00b9ad3c2b6a2..ea64b7687f4c0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Limit.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Limit.java @@ -57,6 +57,11 @@ public Expression limit() { return limit; } + @Override + public String commandName() { + return "LIMIT"; + } + @Override public boolean expressionsResolved() { return limit.resolved(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java index b94d60a9face2..df81d730bcf1b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java @@ -101,6 +101,8 @@ public boolean resolved() { return lazyResolved; } + public abstract String commandName(); + public abstract boolean expressionsResolved(); @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java index c0eaaa96b86d9..141d1a0945ddd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java @@ -111,6 +111,11 @@ public JoinConfig joinConfig() { return new JoinConfig(JoinType.LEFT, matchFields, leftFields, rightFields); } + @Override + public String commandName() { + return "LOOKUP"; + } + @Override public boolean expressionsResolved() { return tableName.resolved() && Resolvables.resolved(matchFields) && localRelation != null; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java index 98eb5d3fcc451..8519ca0350b6e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java @@ -77,6 +77,11 @@ public Attribute expanded() { return expanded; } + @Override + public String commandName() { + return "MV_EXPAND"; + } + @Override public boolean expressionsResolved() { return target.resolved(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/OrderBy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/OrderBy.java index 9b79f9510feaf..8756cddbc3c97 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/OrderBy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/OrderBy.java @@ -63,6 +63,11 @@ public List order() { return order; } + @Override + public String commandName() { + return "SORT"; + } + @Override public boolean expressionsResolved() { return Resolvables.resolved(order); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Project.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Project.java index e12a8cb557fde..841e7fbe81896 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Project.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Project.java @@ -78,6 +78,14 @@ public boolean resolved() { return super.resolved() && Expressions.anyMatch(projections, Functions::isAggregate) == false; } + @Override + public String commandName() { + // this could represent multiple commands (KEEP, DROP, RENAME) + // and should not be present in a pre-analyzed plan. + // maybe it should throw exception? + return ""; + } + @Override public boolean expressionsResolved() { return Resolvables.resolved(projections); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java index e587d7ad94d0e..773d3fd015e5f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java @@ -51,6 +51,11 @@ public List output() { return Expressions.asAttributes(projectionsAfterResolution); } + @Override + public String commandName() { + return "RENAME"; + } + @Override public boolean expressionsResolved() { for (var alias : renamings) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Row.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Row.java index 0fe45e9182f0a..57cce3c26afc7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Row.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Row.java @@ -46,6 +46,11 @@ public List output() { return Expressions.asAttributes(fields); } + @Override + public String commandName() { + return "ROW"; + } + @Override public boolean expressionsResolved() { return Resolvables.resolved(fields); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/TopN.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/TopN.java index a9a5dbddc544f..d6e0e4334bd47 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/TopN.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/TopN.java @@ -55,6 +55,13 @@ public String getWriteableName() { return ENTRY.name; } + @Override + public String commandName() { + // this is the result of optimizations, it will never appear in a pre-analyzed plan + // maybe we should throw exception? + return ""; + } + @Override public boolean expressionsResolved() { return limit.resolved() && Resolvables.resolved(order); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java index 8414419529c47..fdc2321c8bef6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java @@ -31,13 +31,20 @@ public class UnresolvedRelation extends LeafPlan implements Unresolvable { private final IndexMode indexMode; private final String unresolvedMsg; + /** + * Used by telemetry to say if this is the result of a FROM command + * or a METRICS command (or maybe something else in the future) + */ + private final String commandName; + public UnresolvedRelation( Source source, TableIdentifier table, boolean frozen, List metadataFields, IndexMode indexMode, - String unresolvedMessage + String unresolvedMessage, + String commandName ) { super(source); this.table = table; @@ -45,6 +52,7 @@ public UnresolvedRelation( this.metadataFields = metadataFields; this.indexMode = indexMode; this.unresolvedMsg = unresolvedMessage == null ? "Unknown index [" + table.index() + "]" : unresolvedMessage; + this.commandName = commandName; } @Override @@ -59,7 +67,7 @@ public String getWriteableName() { @Override protected NodeInfo info() { - return NodeInfo.create(this, UnresolvedRelation::new, table, frozen, metadataFields, indexMode, unresolvedMsg); + return NodeInfo.create(this, UnresolvedRelation::new, table, frozen, metadataFields, indexMode, unresolvedMsg, commandName); } public TableIdentifier table() { @@ -75,6 +83,21 @@ public boolean resolved() { return false; } + /** + * + * This is used by {@link org.elasticsearch.xpack.esql.stats.PlanningMetrics} to collect query statistics + * It can return + *
    + *
  • "FROM" if this a |FROM idx command
  • + *
  • "FROM TS" if it is the result of a | METRICS idx some_aggs() BY fields command
  • + *
  • "METRICS" if it is the result of a | METRICS idx (no aggs, no groupings)
  • + *
+ */ + @Override + public String commandName() { + return commandName; + } + @Override public boolean expressionsResolved() { return false; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java index 7ad1bcad2d9d0..b9004e3758c9b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java @@ -185,6 +185,11 @@ public boolean resolved() { return childrenResolved() && expressionsResolved(); } + @Override + public String commandName() { + return "JOIN"; + } + @Override public int hashCode() { return Objects.hash(config, left(), right()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelation.java index d6106bae6b6b8..07432481d2341 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelation.java @@ -63,6 +63,14 @@ public LocalSupplier supplier() { return supplier; } + @Override + public String commandName() { + // this colud be an empty source, a lookup table or something else + // but it should not be present in a pre-analyzed plan + // maybe we sholud throw exception? + return ""; + } + @Override public boolean expressionsResolved() { return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/meta/MetaFunctions.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/meta/MetaFunctions.java index 58c344219e6a9..029cb6164167c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/meta/MetaFunctions.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/meta/MetaFunctions.java @@ -116,6 +116,11 @@ private static BytesRef asBytesRefOrNull(String string) { return Strings.hasText(string) ? new BytesRef(string) : null; } + @Override + public String commandName() { + return "META FUNCTIONS"; + } + @Override public boolean expressionsResolved() { return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/show/ShowInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/show/ShowInfo.java index 0fc30c96c809d..fa432537d27e3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/show/ShowInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/show/ShowInfo.java @@ -58,6 +58,11 @@ public List> values() { return List.of(row); } + @Override + public String commandName() { + return "SHOW"; + } + @Override public boolean expressionsResolved() { return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index f0686baf68f6f..c630051e79a26 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -114,7 +114,7 @@ public Collection createComponents(PluginServices services) { BlockFactory blockFactory = new BlockFactory(circuitBreaker, bigArrays, maxPrimitiveArrayBlockSize); setupSharedSecrets(); return List.of( - new PlanExecutor(new IndexResolver(services.client())), + new PlanExecutor(new IndexResolver(services.client()), services.telemetryProvider().getMeterRegistry()), new ExchangeService(services.clusterService().getSettings(), services.threadPool(), ThreadPool.Names.SEARCH, blockFactory), blockFactory ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 25d155ccfde07..29a32df8e6239 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.session; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.regex.Regex; @@ -31,7 +30,6 @@ import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; @@ -56,11 +54,11 @@ import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.Mapper; +import org.elasticsearch.xpack.esql.stats.PlanningMetrics; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.BiFunction; @@ -86,6 +84,7 @@ public class EsqlSession { private final Mapper mapper; private final PhysicalPlanOptimizer physicalPlanOptimizer; + private final PlanningMetrics planningMetrics; public EsqlSession( String sessionId, @@ -96,7 +95,8 @@ public EsqlSession( EsqlFunctionRegistry functionRegistry, LogicalPlanOptimizer logicalPlanOptimizer, Mapper mapper, - Verifier verifier + Verifier verifier, + PlanningMetrics planningMetrics ) { this.sessionId = sessionId; this.configuration = configuration; @@ -108,6 +108,7 @@ public EsqlSession( this.mapper = mapper; this.logicalPlanOptimizer = logicalPlanOptimizer; this.physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration)); + this.planningMetrics = planningMetrics; } public String sessionId() { @@ -191,6 +192,7 @@ public void analyzedPlan(LogicalPlan parsed, ActionListener listene } preAnalyze(parsed, (indices, policies) -> { + planningMetrics.gatherPreAnalysisMetrics(parsed); Analyzer analyzer = new Analyzer(new AnalyzerContext(configuration, functionRegistry, indices, policies), verifier); var plan = analyzer.analyze(parsed); plan.setAnalyzed(); @@ -395,36 +397,4 @@ public PhysicalPlan optimizedPhysicalPlan(LogicalPlan optimizedPlan) { LOGGER.debug("Optimized physical plan:\n{}", plan); return plan; } - - public static InvalidMappedField specificValidity(String fieldName, Map types) { - boolean hasUnmapped = types.containsKey(IndexResolver.UNMAPPED); - boolean hasTypeConflicts = types.size() > (hasUnmapped ? 2 : 1); - String metricConflictsTypeName = null; - boolean hasMetricConflicts = false; - - if (hasTypeConflicts == false) { - for (Map.Entry type : types.entrySet()) { - if (IndexResolver.UNMAPPED.equals(type.getKey())) { - continue; - } - if (type.getValue().metricConflictsIndices() != null && type.getValue().metricConflictsIndices().length > 0) { - hasMetricConflicts = true; - metricConflictsTypeName = type.getKey(); - break; - } - } - } - - InvalidMappedField result = null; - if (hasMetricConflicts) { - StringBuilder errorMessage = new StringBuilder(); - errorMessage.append( - "mapped as different metric types in indices: [" - + String.join(", ", types.get(metricConflictsTypeName).metricConflictsIndices()) - + "]" - ); - result = new InvalidMappedField(fieldName, errorMessage.toString()); - } - return result; - }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/PlanningMetrics.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/PlanningMetrics.java new file mode 100644 index 0000000000000..7b452e50fd525 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/PlanningMetrics.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.stats; + +import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * This class is responsible for collecting metrics related to ES|QL planning. + */ +public class PlanningMetrics { + private Map commands = new HashMap<>(); + private Map functions = new HashMap<>(); + + public void gatherPreAnalysisMetrics(LogicalPlan plan) { + plan.forEachDown(p -> add(commands, p.commandName())); + plan.forEachExpressionDown(UnresolvedFunction.class, p -> add(functions, p.name().toUpperCase(Locale.ROOT))); + } + + private void add(Map map, String key) { + Integer cmd = map.get(key); + map.put(key, cmd == null ? 1 : cmd + 1); + } + + public Map commands() { + return commands; + } + + public Map functions() { + return functions; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/PlanningMetricsManager.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/PlanningMetricsManager.java new file mode 100644 index 0000000000000..a2d00a1f530e9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/PlanningMetricsManager.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.stats; + +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Map; + +/** + * This class is responsible for publishing metrics related to ES|QL planning. + * + * @see METERING + */ +public class PlanningMetricsManager { + + // APM counters + private final LongCounter featuresCounter; + private final LongCounter featuresCounterAll; + private final LongCounter functionsCounter; + private final LongCounter functionsCounterAll; + + public static String ESQL_PREFIX = "es.esql."; + public static String FEATURES_PREFIX = "commands."; + public static String FUNCTIONS_PREFIX = "functions."; + + /** + * Number of times a command is used. + * If the command is used N times in a single query, this counter will be incremented by N + */ + public static final String FEATURE_METRICS_ALL = ESQL_PREFIX + FEATURES_PREFIX + "usages.total"; + + /** + * Queries that use a command. + * If a query uses a command N times, this will still be incremented by one only + */ + public static final String FEATURE_METRICS = ESQL_PREFIX + FEATURES_PREFIX + "queries.total"; + + /** + * Number of times a function is used. + * If the function is used N times in a single query, this counter will be incremented by N + */ + public static final String FUNCTION_METRICS_ALL = ESQL_PREFIX + FUNCTIONS_PREFIX + "usages.total"; + + /** + * Queries that use a command. + * If a query uses a command N times, this will still be incremented by one only + */ + public static final String FUNCTION_METRICS = ESQL_PREFIX + FUNCTIONS_PREFIX + "queries.total"; + public static final String FEATURE_NAME = "feature_name"; + + /** + * the query was executed successfully or not + */ + public static final String SUCCESS = "success"; + + public PlanningMetricsManager(MeterRegistry meterRegistry) { + featuresCounter = meterRegistry.registerLongCounter( + FEATURE_METRICS, + "ESQL features, total number of queries that use them", + "unit" + ); + featuresCounterAll = meterRegistry.registerLongCounter(FEATURE_METRICS_ALL, "ESQL features, total usage", "unit"); + functionsCounter = meterRegistry.registerLongCounter( + FUNCTION_METRICS, + "ESQL functions, total number of queries that use them", + "unit" + ); + functionsCounterAll = meterRegistry.registerLongCounter(FUNCTION_METRICS_ALL, "ESQL functions, total usage", "unit"); + } + + /** + * Publishes the collected metrics to the meter registry + */ + public void publish(PlanningMetrics metrics, boolean success) { + metrics.commands().entrySet().forEach(x -> incCommand(x.getKey(), x.getValue(), success)); + metrics.functions().entrySet().forEach(x -> incFunction(x.getKey(), x.getValue(), success)); + } + + private void incCommand(String name, int count, boolean success) { + this.featuresCounter.incrementBy(1, Map.ofEntries(Map.entry(FEATURE_NAME, name), Map.entry(SUCCESS, success))); + this.featuresCounterAll.incrementBy(count, Map.ofEntries(Map.entry(FEATURE_NAME, name), Map.entry(SUCCESS, success))); + } + + private void incFunction(String name, int count, boolean success) { + this.functionsCounter.incrementBy(1, Map.ofEntries(Map.entry(FEATURE_NAME, name), Map.entry(SUCCESS, success))); + this.functionsCounterAll.incrementBy(count, Map.ofEntries(Map.entry(FEATURE_NAME, name), Map.entry(SUCCESS, success))); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index e0f9b8690e289..a7d8c98a606b5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -85,6 +85,7 @@ import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.stats.DisabledSearchStats; +import org.elasticsearch.xpack.esql.stats.PlanningMetrics; import org.junit.After; import org.junit.Before; import org.mockito.Mockito; @@ -409,7 +410,8 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { functionRegistry, new LogicalPlanOptimizer(new LogicalOptimizerContext(configuration)), mapper, - TEST_VERIFIER + TEST_VERIFIER, + new PlanningMetrics() ); TestPhysicalOperationProviders physicalOperationProviders = testOperationProviders(testDataset); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index e4872b24558bc..b534320f60c10 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -95,7 +95,8 @@ public class AnalyzerTests extends ESTestCase { false, List.of(), IndexMode.STANDARD, - null + null, + "FROM" ); private static final int MAX_LIMIT = EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index 2756a0fbd8016..5c166e92ab152 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -243,6 +243,11 @@ public UnaryPlan replaceChild(LogicalPlan newChild) { return new MockFieldAttributeCommand(source(), newChild, field); } + @Override + public String commandName() { + return "MOCK"; + } + @Override public boolean expressionsResolved() { return true; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 6980171a7bcd7..0d4615b44aa35 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -1670,12 +1670,20 @@ public void testInvalidAlias() { } private LogicalPlan unresolvedRelation(String index) { - return new UnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, index), false, List.of(), IndexMode.STANDARD, null); + return new UnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, index), false, List.of(), IndexMode.STANDARD, null, "FROM"); } private LogicalPlan unresolvedTSRelation(String index) { List metadata = List.of(new MetadataAttribute(EMPTY, MetadataAttribute.TSID_FIELD, DataType.KEYWORD, false)); - return new UnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, index), false, metadata, IndexMode.TIME_SERIES, null); + return new UnresolvedRelation( + EMPTY, + new TableIdentifier(EMPTY, null, index), + false, + metadata, + IndexMode.TIME_SERIES, + null, + "FROM TS" + ); } public void testMetricWithGroupKeyAsAgg() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java index 5e45de6c77c42..edf75170adc63 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java @@ -110,6 +110,11 @@ public String getWriteableName() { throw new UnsupportedOperationException("not serialized"); } + @Override + public String commandName() { + return "DUMMY"; + } + @Override public boolean expressionsResolved() { throw new UnsupportedOperationException(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index d3795c9e9d953..cef04727bb8ed 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.fieldcaps.IndexFieldCapabilities; import org.elasticsearch.client.internal.Client; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -98,7 +99,7 @@ public void testFailedMetric() { return null; }).when(esqlClient).execute(eq(EsqlResolveFieldsAction.TYPE), any(), any()); - var planExecutor = new PlanExecutor(indexResolver); + var planExecutor = new PlanExecutor(indexResolver, MeterRegistry.NOOP); var enrichResolver = mockEnrichResolver(); var request = new EsqlQueryRequest(); From 111030b20dc13fad38a13575c1c9a7796323b7f8 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 4 Sep 2024 01:53:28 +1000 Subject: [PATCH 010/115] Mute org.elasticsearch.xpack.esql.action.ManyShardsIT testRejection #112406 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 14dbba8dfdc0f..78e7c92b2fa72 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -185,6 +185,9 @@ tests: - class: org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests method: testAggregateIntermediate {TestCase=} issue: https://github.com/elastic/elasticsearch/issues/112463 +- class: org.elasticsearch.xpack.esql.action.ManyShardsIT + method: testRejection + issue: https://github.com/elastic/elasticsearch/issues/112406 # Examples: # From f83d6bebd6256b9dc9c2c49e773b4794b06b48a0 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 4 Sep 2024 01:53:44 +1000 Subject: [PATCH 011/115] Mute org.elasticsearch.xpack.esql.action.ManyShardsIT testConcurrentQueries #112424 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 78e7c92b2fa72..9223b781b177d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -188,6 +188,9 @@ tests: - class: org.elasticsearch.xpack.esql.action.ManyShardsIT method: testRejection issue: https://github.com/elastic/elasticsearch/issues/112406 +- class: org.elasticsearch.xpack.esql.action.ManyShardsIT + method: testConcurrentQueries + issue: https://github.com/elastic/elasticsearch/issues/112424 # Examples: # From 46e73b86b1e66556f271127dc30b79bbeb6f3045 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 3 Sep 2024 18:12:24 +0200 Subject: [PATCH 012/115] ES|QL: Fix #112117 by skipping tests in v < 8.14 (#112468) Skipping some tests in v < 8.14 because of missing syntax in previous versions. Other tests with the same syntax (right above these two) have the same skip Fixes https://github.com/elastic/elasticsearch/issues/112117 Fixes https://github.com/elastic/elasticsearch/issues/112118 --- muted-tests.yml | 6 ------ .../esql/qa/testFixtures/src/main/resources/stats.csv-spec | 4 ++-- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 9223b781b177d..6f52c67ab5170 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -128,12 +128,6 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testDeleteJobAfterMissingIndex issue: https://github.com/elastic/elasticsearch/issues/112088 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {stats.ByTwoCalculatedSecondOverwrites SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112117 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112118 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/preview_transforms/Test preview transform latest} issue: https://github.com/elastic/elasticsearch/issues/112144 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 3be846630d5b8..02a2cac0513c0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1618,7 +1618,7 @@ m:i | o:i | l:i | s:i 1 | 39729 | 1 | 39729 ; -byTwoCalculatedSecondOverwrites +byTwoCalculatedSecondSameNameAsFirst#[skip:-8.13.99,reason:supported in 8.14] FROM employees | STATS m = MAX(salary) by l = salary + 1, l = languages + 1 | SORT m @@ -1633,7 +1633,7 @@ FROM employees 74970 | 4 ; -byTwoCalculatedSecondOverwritesReferencingFirst +byTwoCalculatedSecondShadowingAndReferencingFirst#[skip:-8.13.99,reason:supported in 8.14] FROM employees | EVAL l = languages | STATS m = MAX(salary) by l = l + 1, l = l + 1 From a55cf5c08670b6fe91649d858e6361703bb108a5 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 3 Sep 2024 12:14:04 -0400 Subject: [PATCH 013/115] [ML] Create StreamingHttpResultPublisher (#112026) Adds streaming support to Inference API's Apache Client code by implementing an Apache HttpAsyncResponseConsumer. The ResponseConsumer will read response bytes as it gets them, converting them into an Inference HttpResult object. This leverages Java's Flow API to stream HttpResults from the Apache client back through the Inference API. The Flow API has the hooks and patterns to: 1. Send a stream of elements from source (Publisher) to consumer (Subscriber). 2. Control the rate that elements are sent, the Subscriber requests for the next element from the Publisher. 3. Bi-directional cancel methods, either the Publisher or the Subscriber can communicate when the stream is canceled. 4. Chain processing via Flow.Processor for when a Subscriber needs to mutate the elements from the Publisher, which we will do to convert the HttpResult to an InferenceServiceResults in a later change. Co-authored-by: Elastic Machine --- docs/changelog/112026.yaml | 5 + .../inference/external/http/HttpClient.java | 48 +- .../external/http/RequestBasedTaskRunner.java | 62 ++ .../http/StreamingHttpResultPublisher.java | 228 +++++++ .../external/request/HttpRequest.java | 6 + .../external/http/HttpClientTests.java | 56 +- .../http/RequestBasedTaskRunnerTests.java | 157 +++++ .../StreamingHttpResultPublisherTests.java | 632 ++++++++++++++++++ 8 files changed, 1188 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/112026.yaml create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestBasedTaskRunner.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/RequestBasedTaskRunnerTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisherTests.java diff --git a/docs/changelog/112026.yaml b/docs/changelog/112026.yaml new file mode 100644 index 0000000000000..fedf001923ab4 --- /dev/null +++ b/docs/changelog/112026.yaml @@ -0,0 +1,5 @@ +pr: 112026 +summary: Create `StreamingHttpResultPublisher` +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java index 5ae137419b366..6b04b66cb7c11 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java @@ -13,6 +13,7 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; +import org.apache.http.protocol.HttpContext; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; @@ -25,6 +26,7 @@ import java.io.IOException; import java.util.Objects; import java.util.concurrent.CancellationException; +import java.util.concurrent.Flow; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.core.Strings.format; @@ -143,10 +145,54 @@ private void respondUsingUtilityThread(HttpResponse response, HttpRequest reques }); } - private void failUsingUtilityThread(Exception exception, ActionListener listener) { + private void failUsingUtilityThread(Exception exception, ActionListener listener) { threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(() -> listener.onFailure(exception)); } + public void stream(HttpRequest request, HttpContext context, ActionListener> listener) throws IOException { + // The caller must call start() first before attempting to send a request + assert status.get() == Status.STARTED : "call start() before attempting to send a request"; + + // apache can sometimes send us the same error in the consumer and the callback + // sometimes it sends us an error just on the callback + // notifyOnce will dedupe for us + var callOnceListener = ActionListener.notifyOnce(listener); + + SocketAccess.doPrivileged( + () -> client.execute( + request.requestProducer(), + new StreamingHttpResultPublisher(threadPool, settings, callOnceListener), + context, + new FutureCallback<>() { + @Override + public void completed(HttpResponse response) { + // StreamingHttpResultPublisher will publish results to the Flow.Publisher returned in the ActionListener + } + + @Override + public void failed(Exception ex) { + throttlerManager.warn( + logger, + format("Request from inference entity id [%s] failed", request.inferenceEntityId()), + ex + ); + failUsingUtilityThread(ex, callOnceListener); + } + + @Override + public void cancelled() { + failUsingUtilityThread( + new CancellationException( + format("Request from inference entity id [%s] was cancelled", request.inferenceEntityId()) + ), + callOnceListener + ); + } + } + ) + ); + } + @Override public void close() throws IOException { status.set(Status.STOPPED); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestBasedTaskRunner.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestBasedTaskRunner.java new file mode 100644 index 0000000000000..85aac661e6091 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestBasedTaskRunner.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +/** + *

Runs a command synchronously on at most one thread. Threads make a request to run the command. If no thread is running the command, + * then the command will start on the provided {@link #threadPool}'s {@link #executorServiceName}. If a thread is currently running the + * command, then that thread is notified to rerun the command after it is finished.

+ * + *

This guarantees only one thread is working on a command at a given point in time.

+ */ +class RequestBasedTaskRunner { + private final Runnable command; + private final ThreadPool threadPool; + private final String executorServiceName; + private final AtomicInteger loopCount = new AtomicInteger(0); + private final AtomicBoolean isRunning = new AtomicBoolean(true); + + RequestBasedTaskRunner(Runnable command, ThreadPool threadPool, String executorServiceName) { + this.command = Objects.requireNonNull(command); + this.threadPool = Objects.requireNonNull(threadPool); + this.executorServiceName = Objects.requireNonNull(executorServiceName); + } + + /** + * If there is currently a thread running in a loop, it should pick up this new request. + * If not, check if this thread is one of ours and reuse it. + * Else, offload to a new thread so we do not block another threadpool's thread. + */ + public void requestNextRun() { + if (loopCount.getAndIncrement() == 0) { + var currentThreadPool = EsExecutors.executorName(Thread.currentThread().getName()); + if (executorServiceName.equalsIgnoreCase(currentThreadPool)) { + run(); + } else { + threadPool.executor(executorServiceName).execute(this::run); + } + } + } + + public void cancel() { + isRunning.set(false); + } + + private void run() { + do { + command.run(); + } while (isRunning.get() && loopCount.decrementAndGet() > 0); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java new file mode 100644 index 0000000000000..49a9048a69df1 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java @@ -0,0 +1,228 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.HttpResponse; +import org.apache.http.nio.ContentDecoder; +import org.apache.http.nio.IOControl; +import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.http.nio.util.SimpleInputBuffer; +import org.apache.http.protocol.HttpContext; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Deque; +import java.util.Objects; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.Flow; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; + +/** + *

Streams responses from Apache's HttpAsyncResponseConsumer to Java's Flow.Publisher.

+ * + *

The ActionListener is called once when the HttpResponse is initially received to establish the Flow. All subsequent responses and + * errors will be sent through the Flow API. Consumers are expected to call #onSubscribe with a + * {@link java.util.concurrent.Flow.Subscriber} when the ActionListener is called. Consumers can then begin using the resulting + * {@link java.util.concurrent.Flow.Subscription} to request HttpResults from this publisher.

+ * + *

Consumers should expect the first HttpResult to represent the overall HttpResponse. Apache opens the channel with an HttpResponse + * before it starts sending response bytes. If the HttpResponse is an error, Apache may only send an HttpResponse with an HttpEntity, + * so this publisher will send a single HttpResult. If the HttpResponse is healthy, Apache will send an HttpResponse with or without + * the HttpEntity.

+ */ +class StreamingHttpResultPublisher implements HttpAsyncResponseConsumer, Flow.Publisher { + private final HttpSettings settings; + private final ActionListener> listener; + + // used to manage the HTTP response + private volatile HttpResponse response; + private volatile Exception ex; + + // used to control the state of this publisher (Apache) and its interaction with its subscriber + private final AtomicBoolean isDone = new AtomicBoolean(false); + private final AtomicBoolean subscriptionCanceled = new AtomicBoolean(false); + private volatile Flow.Subscriber subscriber; + + private final RequestBasedTaskRunner taskRunner; + private final AtomicBoolean pendingRequest = new AtomicBoolean(false); + private final Deque queue = new ConcurrentLinkedDeque<>(); + + // used to control the flow of data from the Apache client, if we're producing more bytes than we can consume then we'll pause + private final AtomicLong bytesInQueue = new AtomicLong(0); + private final Object ioLock = new Object(); + private volatile IOControl savedIoControl; + + StreamingHttpResultPublisher(ThreadPool threadPool, HttpSettings settings, ActionListener> listener) { + this.settings = Objects.requireNonNull(settings); + this.listener = Objects.requireNonNull(listener); + + this.taskRunner = new RequestBasedTaskRunner(new OffloadThread(), threadPool, UTILITY_THREAD_POOL_NAME); + } + + @Override + public void responseReceived(HttpResponse httpResponse) throws IOException { + this.response = httpResponse; + var firstResponse = HttpResult.create(settings.getMaxResponseSize(), response); + this.queue.offer(() -> subscriber.onNext(firstResponse)); + this.listener.onResponse(this); + } + + @Override + public void subscribe(Flow.Subscriber subscriber) { + if (this.subscriber != null) { + subscriber.onError(new IllegalStateException("Only one subscriber is allowed for this Publisher.")); + return; + } + + this.subscriber = subscriber; + subscriber.onSubscribe(new HttpSubscription()); + } + + @Override + public void consumeContent(ContentDecoder contentDecoder, IOControl ioControl) throws IOException { + // if the subscriber canceled us, tell Apache + if (subscriptionCanceled.get()) { + ioControl.shutdown(); + return; + } + + var buffer = new SimpleInputBuffer(4096); + var consumed = buffer.consumeContent(contentDecoder); + var allBytes = new byte[consumed]; + buffer.read(allBytes); + + // we can have empty bytes, don't bother sending them + if (allBytes.length > 0) { + queue.offer(() -> { + subscriber.onNext(new HttpResult(response, allBytes)); + var currentBytesInQueue = bytesInQueue.updateAndGet(current -> Long.max(0, current - allBytes.length)); + if (savedIoControl != null) { + var maxBytes = settings.getMaxResponseSize().getBytes() * 0.5; + if (currentBytesInQueue <= maxBytes) { + resumeProducer(); + } + } + }); + } + + // always check if totalByteSize > the configured setting in case the settings change + if (bytesInQueue.accumulateAndGet(allBytes.length, Long::sum) >= settings.getMaxResponseSize().getBytes()) { + pauseProducer(ioControl); + } + + // always run in case we're waking up from a pause and need to start a new thread + taskRunner.requestNextRun(); + } + + private void pauseProducer(IOControl ioControl) { + ioControl.suspendInput(); + synchronized (ioLock) { + savedIoControl = ioControl; + } + } + + private void resumeProducer() { + synchronized (ioLock) { + if (savedIoControl != null) { + savedIoControl.requestInput(); + savedIoControl = null; + } + } + } + + @Override + public void responseCompleted(HttpContext httpContext) {} + + // called when Apache is failing the response + @Override + public void failed(Exception e) { + if (this.isDone.compareAndSet(false, true)) { + ex = e; + queue.offer(() -> subscriber.onError(e)); + taskRunner.requestNextRun(); + } + } + + // called when Apache is done with the response + @Override + public void close() { + if (isDone.compareAndSet(false, true)) { + queue.offer(() -> subscriber.onComplete()); + taskRunner.requestNextRun(); + } + } + + // called when Apache is canceling the response + @Override + public boolean cancel() { + close(); + return true; + } + + @Override + public Exception getException() { + return ex; + } + + @Override + public HttpResponse getResult() { + return response; + } + + @Override + public boolean isDone() { + return isDone.get(); + } + + private class HttpSubscription implements Flow.Subscription { + @Override + public void request(long n) { + if (subscriptionCanceled.get()) { + return; + } + + if (n > 0) { + pendingRequest.set(true); + taskRunner.requestNextRun(); + } else { + // per Subscription's spec, fail the subscriber and stop the processor + cancel(); + subscriber.onError(new IllegalArgumentException("Subscriber requested a non-positive number " + n)); + } + } + + @Override + public void cancel() { + if (subscriptionCanceled.compareAndSet(false, true)) { + taskRunner.cancel(); + } + } + } + + private class OffloadThread implements Runnable { + @Override + public void run() { + if (subscriptionCanceled.get()) { + return; + } + + if (queue.isEmpty() == false && pendingRequest.compareAndSet(true, false)) { + var next = queue.poll(); + if (next != null) { + next.run(); + } else { + pendingRequest.set(true); + } + } + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/HttpRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/HttpRequest.java index d81d16d6cbe10..ce72cb8b0b65a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/HttpRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/HttpRequest.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.inference.external.request; import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.nio.client.methods.HttpAsyncMethods; +import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import java.util.Objects; @@ -19,4 +21,8 @@ public record HttpRequest(HttpRequestBase httpRequestBase, String inferenceEntit Objects.requireNonNull(httpRequestBase); Objects.requireNonNull(inferenceEntityId); } + + public HttpAsyncRequestProducer requestProducer() { + return HttpAsyncMethods.create(httpRequestBase); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java index 3fb6d14e66674..aa27bf0d2fc81 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.inference.external.http; import org.apache.http.HttpHeaders; -import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.protocol.HttpClientContext; @@ -19,6 +18,7 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; +import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.reactor.IOReactorException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.PlainActionFuture; @@ -41,6 +41,7 @@ import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; import java.util.concurrent.CancellationException; +import java.util.concurrent.Flow; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; @@ -116,8 +117,7 @@ public void testSend_FailedCallsOnFailure() throws Exception { var asyncClient = mock(CloseableHttpAsyncClient.class); doAnswer(invocation -> { - @SuppressWarnings("unchecked") - FutureCallback listener = (FutureCallback) invocation.getArguments()[2]; + FutureCallback listener = invocation.getArgument(2); listener.failed(new ElasticsearchException("failure")); return mock(Future.class); }).when(asyncClient).execute(any(HttpUriRequest.class), any(), any()); @@ -139,8 +139,7 @@ public void testSend_CancelledCallsOnFailure() throws Exception { var asyncClient = mock(CloseableHttpAsyncClient.class); doAnswer(invocation -> { - @SuppressWarnings("unchecked") - FutureCallback listener = (FutureCallback) invocation.getArguments()[2]; + FutureCallback listener = invocation.getArgument(2); listener.cancelled(); return mock(Future.class); }).when(asyncClient).execute(any(HttpUriRequest.class), any(), any()); @@ -161,6 +160,53 @@ public void testSend_CancelledCallsOnFailure() throws Exception { } } + public void testStream_FailedCallsOnFailure() throws Exception { + var asyncClient = mock(CloseableHttpAsyncClient.class); + + doAnswer(invocation -> { + FutureCallback listener = invocation.getArgument(3); + listener.failed(new ElasticsearchException("failure")); + return mock(Future.class); + }).when(asyncClient).execute(any(HttpAsyncRequestProducer.class), any(), any(), any()); + + var httpPost = createHttpPost(webServer.getPort(), "a", "b"); + + try (var client = new HttpClient(emptyHttpSettings(), asyncClient, threadPool, mockThrottlerManager())) { + client.start(); + + PlainActionFuture> listener = new PlainActionFuture<>(); + client.stream(httpPost, HttpClientContext.create(), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failure")); + } + } + + public void testStream_CancelledCallsOnFailure() throws Exception { + var asyncClient = mock(CloseableHttpAsyncClient.class); + + doAnswer(invocation -> { + FutureCallback listener = invocation.getArgument(3); + listener.cancelled(); + return mock(Future.class); + }).when(asyncClient).execute(any(HttpAsyncRequestProducer.class), any(), any(), any()); + + var httpPost = createHttpPost(webServer.getPort(), "a", "b"); + + try (var client = new HttpClient(emptyHttpSettings(), asyncClient, threadPool, mockThrottlerManager())) { + client.start(); + + PlainActionFuture> listener = new PlainActionFuture<>(); + client.stream(httpPost, HttpClientContext.create(), listener); + + var thrownException = expectThrows(CancellationException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is(Strings.format("Request from inference entity id [%s] was cancelled", httpPost.inferenceEntityId())) + ); + } + } + @SuppressWarnings("unchecked") public void testStart_MultipleCallsOnlyStartTheClientOnce() throws Exception { var asyncClient = mock(CloseableHttpAsyncClient.class); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/RequestBasedTaskRunnerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/RequestBasedTaskRunnerTests.java new file mode 100644 index 0000000000000..d24bdbe444f52 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/RequestBasedTaskRunnerTests.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; + +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class RequestBasedTaskRunnerTests extends ESTestCase { + private ThreadPool threadPool; + + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = spy(createThreadPool(inferenceUtilityPool())); + } + + @After + public void tearDown() throws Exception { + terminate(threadPool); + super.tearDown(); + } + + public void testLoopOneAtATime() throws Exception { + // count the number of times the runnable is called + var counter = new AtomicInteger(0); + + // block the runnable and wait for the test thread to take an action + var lock = new ReentrantLock(); + var condition = lock.newCondition(); + Runnable block = () -> { + try { + try { + lock.lock(); + condition.await(); + } finally { + lock.unlock(); + } + } catch (InterruptedException e) { + fail(e, "did not unblock the thread in time, likely during threadpool terminate"); + } + }; + Runnable unblock = () -> { + try { + lock.lock(); + condition.signalAll(); + } finally { + lock.unlock(); + } + }; + + var runner = new RequestBasedTaskRunner(() -> { + counter.incrementAndGet(); + block.run(); + }, threadPool, UTILITY_THREAD_POOL_NAME); + + // given we have not called requestNextRun, then no thread should have started + assertThat(counter.get(), equalTo(0)); + verify(threadPool, times(0)).executor(UTILITY_THREAD_POOL_NAME); + + runner.requestNextRun(); + + // given that we have called requestNextRun, then 1 thread should run once + assertBusy(() -> { + verify(threadPool, times(1)).executor(UTILITY_THREAD_POOL_NAME); + assertThat(counter.get(), equalTo(1)); + }); + + // given that we have called requestNextRun while a thread was running, and the thread was blocked + runner.requestNextRun(); + // then 1 thread should run once + verify(threadPool, times(1)).executor(UTILITY_THREAD_POOL_NAME); + assertThat(counter.get(), equalTo(1)); + + // given the thread is unblocked + unblock.run(); + // then 1 thread should run twice + verify(threadPool, times(1)).executor(UTILITY_THREAD_POOL_NAME); + assertBusy(() -> assertThat(counter.get(), equalTo(2))); + + // given the thread is unblocked again, but there were only two calls to requestNextRun + unblock.run(); + // then 1 thread should run twice + verify(threadPool, times(1)).executor(UTILITY_THREAD_POOL_NAME); + assertBusy(() -> assertThat(counter.get(), equalTo(2))); + + // given no thread is running, when we call requestNextRun + runner.requestNextRun(); + // then a second thread should start for the third run + assertBusy(() -> { + verify(threadPool, times(2)).executor(UTILITY_THREAD_POOL_NAME); + assertThat(counter.get(), equalTo(3)); + }); + + // given the thread is unblocked, then it should exit and rejoin the threadpool + unblock.run(); + assertTrue("Test thread should unblock after all runs complete", terminate(threadPool)); + + // final check - we ran three times on two threads + verify(threadPool, times(2)).executor(UTILITY_THREAD_POOL_NAME); + assertThat(counter.get(), equalTo(3)); + } + + public void testCancel() throws Exception { + // count the number of times the runnable is called + var counter = new AtomicInteger(0); + var latch = new CountDownLatch(1); + var runner = new RequestBasedTaskRunner(() -> { + counter.incrementAndGet(); + try { + latch.await(); + } catch (InterruptedException e) { + fail(e, "did not unblock the thread in time, likely during threadpool terminate"); + } + }, threadPool, UTILITY_THREAD_POOL_NAME); + + // given that we have called requestNextRun, then 1 thread should run once + runner.requestNextRun(); + assertBusy(() -> { + verify(threadPool, times(1)).executor(UTILITY_THREAD_POOL_NAME); + assertThat(counter.get(), equalTo(1)); + }); + + // given that a thread is running, three more calls will be queued + runner.requestNextRun(); + runner.requestNextRun(); + runner.requestNextRun(); + + // when we cancel the thread, then the thread should immediately exit and rejoin + runner.cancel(); + latch.countDown(); + assertTrue("Test thread should unblock after all runs complete", terminate(threadPool)); + + // given that we called cancel, when we call requestNextRun then no thread should start + runner.requestNextRun(); + verify(threadPool, times(1)).executor(UTILITY_THREAD_POOL_NAME); + assertThat(counter.get(), equalTo(1)); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisherTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisherTests.java new file mode 100644 index 0000000000000..92a332fe545e3 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisherTests.java @@ -0,0 +1,632 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.HttpResponse; +import org.apache.http.nio.ContentDecoder; +import org.apache.http.nio.IOControl; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.Before; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Flow; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class StreamingHttpResultPublisherTests extends ESTestCase { + private static final byte[] message = "hello".getBytes(StandardCharsets.UTF_8); + private static final long maxBytes = message.length; + private ThreadPool threadPool; + private HttpSettings settings; + private ActionListener> listener; + private StreamingHttpResultPublisher publisher; + + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = mock(ThreadPool.class); + settings = mock(HttpSettings.class); + listener = ActionListener.noop(); + + when(threadPool.executor(UTILITY_THREAD_POOL_NAME)).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + when(settings.getMaxResponseSize()).thenReturn(ByteSizeValue.ofBytes(maxBytes)); + + publisher = new StreamingHttpResultPublisher(threadPool, settings, listener); + } + + /** + * When we receive an http response + * Then we call the listener + * And we queue the initial payload + */ + public void testFirstResponseCallsListener() throws IOException { + var latch = new CountDownLatch(1); + var listener = ActionListener.>wrap( + r -> latch.countDown(), + e -> fail("Listener onFailure should never be called.") + ); + publisher = new StreamingHttpResultPublisher(threadPool, settings, listener); + + publisher.responseReceived(mock(HttpResponse.class)); + + assertThat("Listener's onResponse should be called when we receive a response", latch.getCount(), equalTo(0L)); + } + + /** + * This test combines 4 test since it's easier to verify the exchange of data at once. + * + * Given that the subscriber has not requested data + * When we receive an http response + * Then the publisher enqueues data + * + * Given that the initial http response is queued + * When the subscriber requests data + * Then the subscriber immediately pulls from the queue + * + * Given that the queue is empty + * When the subscriber requests data + * Then no data is sent + * + * Given that the subscriber has requested data + * When the publisher enqueues data + * Then the publisher immediately sends that data + */ + public void testSubscriberAndPublisherExchange() throws IOException { + var subscriber = new TestSubscriber(); + publisher.responseReceived(mock(HttpResponse.class)); + + // subscribe + publisher.subscribe(subscriber); + assertThat("subscribe must call onSubscribe", subscriber.subscription, notNullValue()); + assertThat("onNext should only be called once we have requested data", subscriber.httpResult, nullValue()); + + // request the initial http response + subscriber.requestData(); + assertThat("onNext was called with the initial HttpResponse", subscriber.httpResult, notNullValue()); + assertTrue("HttpResponse has an empty body (because there is no HttpEntity)", subscriber.httpResult.isBodyEmpty()); + subscriber.httpResult = null; // reset test + + // subscriber requests data, publisher has not sent data yet + subscriber.requestData(); + assertThat("onNext should only be called once we have data to process", subscriber.httpResult, nullValue()); + + // publisher sends data + publisher.consumeContent(contentDecoder(message), mock(IOControl.class)); + assertThat("onNext was called with " + new String(message, StandardCharsets.UTF_8), subscriber.httpResult.body(), equalTo(message)); + } + + /** + * When Apache sends a non-200 HttpResponse + * Then we enqueue the only HttpResult and close the stream + */ + public void testNon200Response() throws IOException { + var subscriber = new TestSubscriber(); + // Apache sends a single response and closes the consumer + publisher.responseReceived(mock(HttpResponse.class)); + publisher.close(); + + // subscriber requests data + publisher.subscribe(subscriber); + assertThat("subscribe must call onSubscribe", subscriber.subscription, notNullValue()); + subscriber.requestData(); + assertThat("onNext was called with the initial HttpResponse", subscriber.httpResult, notNullValue()); + assertTrue("HttpResponse has an empty body (because there is no HttpEntity)", subscriber.httpResult.isBodyEmpty()); + subscriber.requestData(); + assertTrue("Publisher has been closed", publisher.isDone()); + assertTrue("Subscriber has been completed", subscriber.completed); + } + + /** + * When we load too many bytes into memory + * Then we pause the Apache IO stream + */ + public void testPauseApache() throws IOException { + var ioControl = mock(IOControl.class); + publisher.responseReceived(mock(HttpResponse.class)); + when(settings.getMaxResponseSize()).thenReturn(ByteSizeValue.ofBytes(maxBytes - 1)); + + publisher.consumeContent(contentDecoder(message), ioControl); + + verify(ioControl).suspendInput(); + } + + /** + * When we empty the bytes from memory + * Then we resume the Apache IO stream + */ + public void testResumeApache() throws IOException { + var subscriber = new TestSubscriber(); + publisher.responseReceived(mock(HttpResponse.class)); + publisher.subscribe(subscriber); + subscriber.requestData(); + subscriber.httpResult = null; + + var ioControl = mock(IOControl.class); + when(settings.getMaxResponseSize()).thenReturn(ByteSizeValue.ofBytes(maxBytes - 1)); + publisher.consumeContent(contentDecoder(message), ioControl); + verify(ioControl).suspendInput(); + + subscriber.requestData(); + verify(ioControl).requestInput(); + } + + /** + * When the publisher sends data to the subscriber + * Then we should decrement the current number of bytes in the queue + */ + public void testTotalBytesDecrement() throws IOException { + var longMessage = "message".getBytes(StandardCharsets.UTF_8); + var shortMessage = "a ".getBytes(StandardCharsets.UTF_8); + when(settings.getMaxResponseSize()).thenReturn(ByteSizeValue.ofBytes(longMessage.length)); + + var subscriber = new TestSubscriber(); + publisher.responseReceived(mock(HttpResponse.class)); + publisher.subscribe(subscriber); + subscriber.requestData(); + subscriber.httpResult = null; + + var ioControl = mock(IOControl.class); + publisher.consumeContent(contentDecoder(shortMessage), ioControl); + verify(ioControl, times(0)).suspendInput(); + publisher.consumeContent(contentDecoder(longMessage), ioControl); + // consumeContent should check that bytesInQueue == consumedBytes and pause + verify(ioControl, times(1)).suspendInput(); + + // requesting data should reduce bytesInQueue but not resume yet because we haven't reduced totalbytes enough + subscriber.requestData(); + verify(ioControl, times(0)).requestInput(); + // now it should unpause + subscriber.requestData(); + verify(ioControl, times(1)).requestInput(); + + // bytesInQueue should be 0, so increase maxResponseSize and verify we don't pause when we consume the same number of bytes + when(settings.getMaxResponseSize()).thenReturn(ByteSizeValue.ofBytes(longMessage.length + 1)); + publisher.consumeContent(contentDecoder(longMessage), ioControl); + verifyNoMoreInteractions(ioControl); + } + + /** + * Given an error from Apache + * When the subscriber requests the next set of data + * Then the subscriber receives the error from Apache + */ + public void testErrorBeforeRequest() { + var subscriber = subscribe(); + var exception = new NullPointerException("test"); + + publisher.failed(exception); + assertThat("subscriber receives exception on next request", subscriber.throwable, nullValue()); + + subscriber.requestData(); + assertThat("subscriber receives exception", subscriber.throwable, is(exception)); + } + + /** + * Given the subscriber is waiting for data + * When Apache sends an error + * Then the subscriber immediately receives the error + */ + public void testErrorAfterRequest() { + var subscriber = subscribe(); + var exception = new NullPointerException("test"); + + subscriber.requestData(); + publisher.failed(exception); + assertThat("subscriber receives exception", subscriber.throwable, is(exception)); + } + + /** + * Given the queue is being processed + * When Apache sends an error before the subscriber asks for more data + * Then the error will be handled the next time the subscriber requests data + */ + public void testErrorWhileRunningBeforeRequest() throws IOException { + var exception = new NullPointerException("test"); + var subscriber = runBefore(() -> publisher.failed(exception)); + + subscriber.requestData(); + assertThat("subscriber receives exception on next request", subscriber.throwable, nullValue()); + + subscriber.requestData(); + assertThat("subscriber receives exception", subscriber.throwable, is(exception)); + } + + /** + * Given the queue is being processed + * When Apache sends an error after the subscriber asks for more data + * Then the error will be forwarded by the queue processor thread + */ + public void testErrorWhileRunningAfterRequest() throws IOException { + var exception = new NullPointerException("test"); + var subscriber = runAfter(() -> publisher.failed(exception)); + + subscriber.requestData(); + assertThat("subscriber receives exception", subscriber.throwable, is(exception)); + } + + /** + * Given Apache closed response processing + * When the subscriber requests more data + * Then the subscriber is marked as completed + */ + public void testCloseBeforeRequest() { + var subscriber = subscribe(); + + publisher.close(); + assertFalse("onComplete should not be called until the subscriber requests it", subscriber.completed); + + subscriber.requestData(); + assertTrue("onComplete should now be called", subscriber.completed); + } + + /** + * Given the subscriber is waiting for more data + * When Apache closes response processing + * Then the subscriber is marked as completed + */ + public void testCloseAfterRequest() { + var subscriber = subscribe(); + + subscriber.requestData(); + publisher.close(); + assertTrue("onComplete should be called", subscriber.completed); + } + + /** + * Given the queue is being processed + * When Apache closes the publisher + * Then the close will be handled the next time the subscriber requests data + */ + public void testCloseWhileRunningBeforeRequest() throws IOException { + var subscriber = runBefore(publisher::close); + + subscriber.requestData(); + assertFalse("onComplete should not be called until the subscriber requests it", subscriber.completed); + + subscriber.requestData(); + assertTrue("onComplete should now be called", subscriber.completed); + } + + /** + * Given the queue is being processed + * When Apache closes the publisher after the subscriber asks for more data + * Then the close will be handled by the queue processor thread + */ + public void testCloseWhileRunningAfterRequest() throws IOException { + var subscriber = runAfter(publisher::close); + subscriber.requestData(); + assertTrue("onComplete should now be called", subscriber.completed); + } + + /** + * Given Apache cancels response processing + * When the subscriber requests more data + * Then the subscriber is marked as completed + */ + public void testCancelBeforeRequest() { + var subscriber = subscribe(); + + publisher.cancel(); + assertFalse("onComplete should not be called until the subscriber requests it", subscriber.completed); + + subscriber.requestData(); + assertTrue("onComplete should now be called", subscriber.completed); + } + + /** + * Given the subscriber is waiting for more data + * When Apache cancels response processing + * Then the subscriber is marked as completed + */ + public void testCancelAfterRequest() { + var subscriber = subscribe(); + + subscriber.requestData(); + publisher.cancel(); + assertTrue("onComplete should be called", subscriber.completed); + } + + /** + * Given the queue is being processed + * When Apache cancels the publisher + * Then the cancel will be handled the next time the subscriber requests data + */ + public void testApacheCancelWhileRunningBeforeRequest() throws IOException { + TestSubscriber subscriber = runBefore(publisher::cancel); + + subscriber.requestData(); + assertFalse("onComplete should not be called until the subscriber requests it", subscriber.completed); + + subscriber.requestData(); + assertTrue("onComplete should now be called", subscriber.completed); + } + + /** + * Given the queue is being processed + * When Apache cancels the publisher after the subscriber asks for more data + * Then the cancel will be handled by the queue processor thread + */ + public void testApacheCancelWhileRunningAfterRequest() throws IOException { + TestSubscriber subscriber = runAfter(publisher::cancel); + + subscriber.requestData(); + assertTrue("onComplete should now be called", subscriber.completed); + } + + /** + * When a subscriber requests a negative number + * Then the subscription should call onError with an IllegalArgumentException + */ + public void testRequestingANegativeNumberFails() { + TestSubscriber subscriber = subscribe(); + + subscriber.subscription.request(-1); + + assertThat( + "onError should be called with an IllegalArgumentException", + subscriber.throwable, + instanceOf(IllegalArgumentException.class) + ); + } + + /** + * When a subscriber requests a negative number + * Then the subscription should call onError with an IllegalArgumentException + */ + public void testRequestingZeroFails() { + TestSubscriber subscriber = subscribe(); + + subscriber.subscription.request(0); + + assertThat( + "onError should be called with an IllegalArgumentException", + subscriber.throwable, + instanceOf(IllegalArgumentException.class) + ); + } + + /** + * Given a subscriber is already subscribed + * When a second subscriber subscribes + * Then that subscriber should receive an IllegalStateException + */ + public void testDoubleSubscribeFails() { + publisher.subscribe(mock()); + + var subscriber = new TestSubscriber(); + publisher.subscribe(subscriber); + assertThat(subscriber.throwable, notNullValue()); + assertThat(subscriber.throwable, instanceOf(IllegalStateException.class)); + } + + /** + * Given the thread is an ML Utility thread + * When a new request is processed + * Then it should reuse that ML Utility thread + */ + public void testReuseMlThread() throws IOException, ExecutionException, InterruptedException, TimeoutException { + try { + threadPool = spy(createThreadPool(inferenceUtilityPool())); + publisher = new StreamingHttpResultPublisher(threadPool, settings, listener); + var subscriber = new TestSubscriber(); + publisher.responseReceived(mock(HttpResponse.class)); + publisher.subscribe(subscriber); + + CompletableFuture.runAsync(subscriber::requestData, threadPool.executor(UTILITY_THREAD_POOL_NAME)).get(5, TimeUnit.SECONDS); + verify(threadPool, times(1)).executor(UTILITY_THREAD_POOL_NAME); + assertThat("onNext was called with the initial HttpResponse", subscriber.httpResult, notNullValue()); + assertTrue("HttpResponse has an empty body (because there is no HttpEntity)", subscriber.httpResult.isBodyEmpty()); + } finally { + terminate(threadPool); + } + } + + /** + * Given that content is still streaming + * When a user cancels the Subscription + * Then the background thread should stop processing data + */ + public void testCancelBreaksInfiniteLoop() throws Exception { + try { + var futureHolder = new AtomicReference>(); + threadPool = spy(createThreadPool(inferenceUtilityPool())); + doAnswer(utilityThreadPool -> { + var realExecutorService = (ExecutorService) utilityThreadPool.callRealMethod(); + var executorServiceSpy = spy(realExecutorService); + doAnswer(runnable -> { + futureHolder.set(CompletableFuture.runAsync(runnable.getArgument(0), realExecutorService)); + return null; // void + }).when(executorServiceSpy).execute(any(Runnable.class)); + return executorServiceSpy; + }).when(threadPool).executor(UTILITY_THREAD_POOL_NAME); + + publisher = new StreamingHttpResultPublisher(threadPool, settings, listener); + publisher.responseReceived(mock(HttpResponse.class)); + // create an infinitely running Subscriber + var subscriber = new Flow.Subscriber() { + Flow.Subscription subscription; + boolean completed = false; + + @Override + public void onSubscribe(Flow.Subscription subscription) { + this.subscription = subscription; + subscription.request(1); + } + + @Override + public void onNext(HttpResult item) { + try { + publisher.consumeContent(contentDecoder(message), mock(IOControl.class)); + } catch (IOException e) { + fail(e, "Failed to publish content for testCancelBreaksInfiniteLoop."); + } + subscription.request(1); // run infinitely + } + + @Override + public void onError(Throwable throwable) { + fail(throwable, "onError should never be called"); + } + + @Override + public void onComplete() { + completed = true; + } + }; + publisher.subscribe(subscriber); + + // verify the thread has started + assertThat("Thread should have started on subscribe", futureHolder.get(), notNullValue()); + assertFalse("Thread should still be running", futureHolder.get().isDone()); + + subscriber.subscription.cancel(); + assertBusy(() -> assertTrue("Thread was not canceled in 10 seconds.", futureHolder.get().isDone())); + } finally { + terminate(threadPool); + } + } + + /** + * Given the message queue is currently being processed + * When a new message is added to the queue + * Then a new processor thread is not started to process that message + */ + public void testOnlyRunOneAtATime() throws IOException { + // start with a message published + publisher.responseReceived(mock(HttpResponse.class)); + TestSubscriber subscriber = new TestSubscriber() { + public void onNext(HttpResult item) { + try { + // publish a second message + publisher.consumeContent(contentDecoder(message), mock(IOControl.class)); + super.requestData(); + // and then exit out of the loop + publisher.cancel(); + } catch (IOException e) { + throw new RuntimeException(e); + } + super.onNext(item); + } + }; + publisher.subscribe(subscriber); + + verify(threadPool, times(0)).executor(UTILITY_THREAD_POOL_NAME); + subscriber.requestData(); + verify(threadPool, times(1)).executor(UTILITY_THREAD_POOL_NAME); + } + + private static ContentDecoder contentDecoder(byte[] message) { + return new ContentDecoder() { + boolean sendBytes = true; + + @Override + public int read(ByteBuffer byteBuffer) { + if (sendBytes) { + sendBytes = false; + byteBuffer.put(message); + return message.length; + } + return 0; + } + + @Override + public boolean isCompleted() { + return true; + } + }; + } + + private TestSubscriber subscribe() { + var subscriber = new TestSubscriber(); + publisher.subscribe(subscriber); + return subscriber; + } + + private TestSubscriber runBefore(Runnable runDuringOnNext) throws IOException { + publisher.responseReceived(mock(HttpResponse.class)); + TestSubscriber subscriber = new TestSubscriber() { + public void onNext(HttpResult item) { + runDuringOnNext.run(); + super.onNext(item); + } + }; + publisher.subscribe(subscriber); + return subscriber; + } + + private TestSubscriber runAfter(Runnable runDuringOnNext) throws IOException { + publisher.responseReceived(mock(HttpResponse.class)); + TestSubscriber subscriber = new TestSubscriber() { + public void onNext(HttpResult item) { + runDuringOnNext.run(); + super.requestData(); + super.onNext(item); + } + }; + publisher.subscribe(subscriber); + return subscriber; + } + + private static class TestSubscriber implements Flow.Subscriber { + private Flow.Subscription subscription; + private HttpResult httpResult; + private Throwable throwable; + private boolean completed; + + @Override + public void onSubscribe(Flow.Subscription subscription) { + this.subscription = subscription; + } + + @Override + public void onNext(HttpResult item) { + this.httpResult = item; + } + + @Override + public void onError(Throwable throwable) { + this.throwable = throwable; + } + + @Override + public void onComplete() { + this.completed = true; + } + + private void requestData() { + subscription.request(1); + } + } +} From 87e257e8739f64d61f13ed01f0c9e72dad65c5d9 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 3 Sep 2024 12:17:41 -0400 Subject: [PATCH 014/115] [ML] Register Task while Streaming (#112369) While Inference is streaming a response, register a Task in the TaskManager to represent the stream. When the Stream finishes, either through completion or error, we will unregister the Task. If a user cancels the Task, it should close out the stream. --- docs/changelog/112369.yaml | 5 + .../inference/action/InferenceAction.java | 24 ++- .../inference/src/main/java/module-info.java | 1 + .../InferenceNamedWriteablesProvider.java | 3 + .../action/TransportInferenceAction.java | 26 ++- .../xpack/inference/action/task/FlowTask.java | 64 ++++++ .../action/task/StreamingTaskManager.java | 198 ++++++++++++++++++ .../task/StreamingTaskManagerTests.java | 192 +++++++++++++++++ 8 files changed, 508 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/112369.yaml create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/task/FlowTask.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/task/StreamingTaskManager.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/task/StreamingTaskManagerTests.java diff --git a/docs/changelog/112369.yaml b/docs/changelog/112369.yaml new file mode 100644 index 0000000000000..fb1c4775f7a12 --- /dev/null +++ b/docs/changelog/112369.yaml @@ -0,0 +1,5 @@ +pr: 112369 +summary: Register Task while Streaming +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java index c38f508db1b6a..d898f961651f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java @@ -164,6 +164,10 @@ public TimeValue getInferenceTimeout() { return inferenceTimeout; } + public boolean isStreaming() { + return false; + } + @Override public ActionRequestValidationException validate() { if (input == null) { @@ -326,9 +330,19 @@ public String toString() { public static class Response extends ActionResponse implements ChunkedToXContentObject { private final InferenceServiceResults results; + private final boolean isStreaming; + private final Flow.Publisher publisher; public Response(InferenceServiceResults results) { this.results = results; + this.isStreaming = false; + this.publisher = null; + } + + public Response(InferenceServiceResults results, Flow.Publisher publisher) { + this.results = results; + this.isStreaming = true; + this.publisher = publisher; } public Response(StreamInput in) throws IOException { @@ -340,6 +354,9 @@ public Response(StreamInput in) throws IOException { // hugging face elser and elser results = transformToServiceResults(List.of(in.readNamedWriteable(InferenceResults.class))); } + // streaming isn't supported via Writeable yet + this.isStreaming = false; + this.publisher = null; } @SuppressWarnings("deprecation") @@ -398,7 +415,7 @@ public InferenceServiceResults getResults() { * Currently set to false while it is being implemented. */ public boolean isStreaming() { - return false; + return isStreaming; } /** @@ -407,8 +424,8 @@ public boolean isStreaming() { * If the RestResponse is closed, it will cancel the subscription. */ public Flow.Publisher publisher() { - assert isStreaming() == false : "This must be implemented when isStreaming() == true"; - throw new UnsupportedOperationException("This must be implemented when isStreaming() == true"); + assert isStreaming() : "this should only be called after isStreaming() verifies this object is non-null"; + return publisher; } @Override @@ -418,6 +435,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeNamedWriteable(results.transformToLegacyFormat().get(0)); } + // streaming isn't supported via Writeable yet } @Override diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index a7e5718a0920e..49abe14dbf302 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -33,6 +33,7 @@ exports org.elasticsearch.xpack.inference.rest; exports org.elasticsearch.xpack.inference.services; exports org.elasticsearch.xpack.inference; + exports org.elasticsearch.xpack.inference.action.task; provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.inference.InferenceFeatures; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index d4810ba930b44..62c2c5fd61992 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.inference.results.LegacyTextEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.inference.action.task.StreamingTaskManager; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceSettings; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.embeddings.AlibabaCloudSearchEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.embeddings.AlibabaCloudSearchEmbeddingsTaskSettings; @@ -126,6 +127,8 @@ public static List getNamedWriteables() { addEisNamedWriteables(namedWriteables); addAlibabaCloudSearchNamedWriteables(namedWriteables); + namedWriteables.addAll(StreamingTaskManager.namedWriteables()); + return namedWriteables; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java index 81669a573a5d1..054cfe1017cad 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java @@ -12,22 +12,28 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceRegistry; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.Model; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.action.task.StreamingTaskManager; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.telemetry.InferenceStats; public class TransportInferenceAction extends HandledTransportAction { + private static final String STREAMING_INFERENCE_TASK_TYPE = "streaming_inference"; + private static final String STREAMING_TASK_ACTION = "xpack/inference/streaming_inference[n]"; private final ModelRegistry modelRegistry; private final InferenceServiceRegistry serviceRegistry; private final InferenceStats inferenceStats; + private final StreamingTaskManager streamingTaskManager; @Inject public TransportInferenceAction( @@ -35,12 +41,14 @@ public TransportInferenceAction( ActionFilters actionFilters, ModelRegistry modelRegistry, InferenceServiceRegistry serviceRegistry, - InferenceStats inferenceStats + InferenceStats inferenceStats, + StreamingTaskManager streamingTaskManager ) { super(InferenceAction.NAME, transportService, actionFilters, InferenceAction.Request::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.modelRegistry = modelRegistry; this.serviceRegistry = serviceRegistry; this.inferenceStats = inferenceStats; + this.streamingTaskManager = streamingTaskManager; } @Override @@ -100,7 +108,21 @@ private void inferOnService( request.getTaskSettings(), request.getInputType(), request.getInferenceTimeout(), - listener.delegateFailureAndWrap((l, inferenceResults) -> l.onResponse(new InferenceAction.Response(inferenceResults))) + createListener(request, listener) ); } + + private ActionListener createListener( + InferenceAction.Request request, + ActionListener listener + ) { + if (request.isStreaming()) { + return listener.delegateFailureAndWrap((l, inferenceResults) -> { + var taskProcessor = streamingTaskManager.create(STREAMING_INFERENCE_TASK_TYPE, STREAMING_TASK_ACTION); + inferenceResults.publisher().subscribe(taskProcessor); + l.onResponse(new InferenceAction.Response(inferenceResults, taskProcessor)); + }); + } + return listener.delegateFailureAndWrap((l, inferenceResults) -> l.onResponse(new InferenceAction.Response(inferenceResults))); + }; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/task/FlowTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/task/FlowTask.java new file mode 100644 index 0000000000000..b565f7adc36fe --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/task/FlowTask.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action.task; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +class FlowTask extends CancellableTask { + private final AtomicReference flowStatus = new AtomicReference<>(FlowStatus.CONNECTING); + + FlowTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + super(id, type, action, description, parentTaskId, headers); + } + + @Override + public Status getStatus() { + return flowStatus.get(); + } + + public void updateStatus(FlowStatus status) { + flowStatus.set(status); + } + + enum FlowStatus implements Task.Status { + CONNECTING("Connecting"), + CONNECTED("Connected"); + + static final String NAME = "streaming_task_manager_flow_status"; + static final Reader STREAM_READER = in -> FlowStatus.valueOf(in.readString()); + + private final String status; + + FlowStatus(String status) { + this.status = status; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(status); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field("status", status).endObject(); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/task/StreamingTaskManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/task/StreamingTaskManager.java new file mode 100644 index 0000000000000..8aa437c773608 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/task/StreamingTaskManager.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action.task; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskAwareRequest; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.Flow; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +/** + * A wrapper around the {@link TaskManager} that creates a new {@link Task} to observe and control a {@link Flow}. When the Flow is + * initiated, a new Task is registered in the TaskManager that represents the Flow. When the Flow ends, either through completion or error, + * the Task will close in the TaskManager. If a user manually closes the Task, it will cancel and close the underlying Flow. + */ +public class StreamingTaskManager { + private final TaskManager taskManager; + private final ThreadPool threadPool; + + @Inject + public StreamingTaskManager(TransportService transportService, ThreadPool threadPool) { + this(transportService.getTaskManager(), threadPool); + } + + StreamingTaskManager(TaskManager taskManager, ThreadPool threadPool) { + this.taskManager = taskManager; + this.threadPool = threadPool; + } + + public Flow.Processor create(String taskType, String taskAction) { + return new TaskBackedProcessor<>(taskType, taskAction); + } + + public static List namedWriteables() { + return List.of(new NamedWriteableRegistry.Entry(Task.Status.class, FlowTask.FlowStatus.NAME, FlowTask.FlowStatus.STREAM_READER)); + } + + private class TaskBackedProcessor implements Flow.Processor { + private static final Logger log = LogManager.getLogger(TaskBackedProcessor.class); + private final String taskType; + private final String taskAction; + private Flow.Subscriber downstream; + private Flow.Subscription upstream; + private FlowTask task; + private final AtomicBoolean isClosed = new AtomicBoolean(false); + private final AtomicLong pendingRequests = new AtomicLong(); + + private TaskBackedProcessor(String taskType, String taskAction) { + this.taskType = taskType; + this.taskAction = taskAction; + } + + @Override + public void subscribe(Flow.Subscriber subscriber) { + if (downstream != null) { + subscriber.onError(new IllegalStateException("Another subscriber is already subscribed.")); + return; + } + + downstream = subscriber; + openOrUpdateTask(); + downstream.onSubscribe(forwardingSubscription()); + } + + private void openOrUpdateTask() { + if (task != null) { + task.updateStatus(FlowTask.FlowStatus.CONNECTED); + } else { + try (var ignored = threadPool.getThreadContext().newTraceContext()) { + task = (FlowTask) taskManager.register(taskType, taskAction, new TaskAwareRequest() { + @Override + public void setParentTask(TaskId taskId) { + throw new UnsupportedOperationException("parent task id for streaming results shouldn't change"); + } + + @Override + public void setRequestId(long requestId) { + throw new UnsupportedOperationException("does not have request ID"); + } + + @Override + public TaskId getParentTask() { + return TaskId.EMPTY_TASK_ID; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + var flowTask = new FlowTask(id, type, action, "", parentTaskId, headers); + flowTask.addListener(TaskBackedProcessor.this::cancelTask); + return flowTask; + } + }); + } + } + } + + private void cancelTask() { + if (isClosed.compareAndSet(false, true)) { + if (upstream != null) { + upstream.cancel(); + } + if (downstream != null) { + downstream.onComplete(); + } + } + } + + private Flow.Subscription forwardingSubscription() { + return new Flow.Subscription() { + @Override + public void request(long n) { + if (isClosed.get()) { + downstream.onComplete(); // shouldn't happen, but reinforce that we're no longer listening + } else if (upstream != null) { + upstream.request(n); + } else { + pendingRequests.accumulateAndGet(n, Long::sum); + } + } + + @Override + public void cancel() { + finishTask(); + if (upstream != null) { + upstream.cancel(); + } + } + }; + } + + @Override + public void onSubscribe(Flow.Subscription subscription) { + if (isClosed.get()) { + subscription.cancel(); + return; + } + + upstream = subscription; + openOrUpdateTask(); + var currentRequestCount = pendingRequests.getAndSet(0); + if (currentRequestCount != 0) { + upstream.request(currentRequestCount); + } + } + + @Override + public void onNext(E item) { + if (isClosed.get()) { + upstream.cancel(); // shouldn't happen, but reinforce that we're no longer listening + } else { + downstream.onNext(item); + } + } + + @Override + public void onError(Throwable throwable) { + finishTask(); + if (downstream != null) { + downstream.onError(throwable); + } else { + log.atDebug() + .withThrowable(throwable) + .log("onError was called before the downstream subscription, rethrowing to close listener."); + throw new IllegalStateException("onError was called before the downstream subscription", throwable); + } + } + + @Override + public void onComplete() { + finishTask(); + if (downstream != null) { + downstream.onComplete(); + } + } + + private void finishTask() { + if (isClosed.compareAndSet(false, true) && task != null) { + taskManager.unregister(task); + } + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/task/StreamingTaskManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/task/StreamingTaskManagerTests.java new file mode 100644 index 0000000000000..8ca4a5f2aa309 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/task/StreamingTaskManagerTests.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action.task; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.TaskAwareRequest; +import org.elasticsearch.tasks.TaskCancelHelper; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.Map; +import java.util.concurrent.Flow; +import java.util.concurrent.atomic.AtomicReference; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.only; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class StreamingTaskManagerTests extends ESTestCase { + private static final String taskType = "taskType"; + private static final String taskAction = "taskAction"; + + private TaskManager taskManager; + private StreamingTaskManager streamingTaskManager; + + @Before + public void setUp() throws Exception { + super.setUp(); + taskManager = mock(); + ThreadPool threadPool = mock(); + streamingTaskManager = new StreamingTaskManager(taskManager, threadPool); + + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + doAnswer(ans -> { + TaskAwareRequest taskAwareRequest = ans.getArgument(2); + return taskAwareRequest.createTask(1L, taskType, taskAction, TaskId.EMPTY_TASK_ID, Map.of()); + }).when(taskManager).register(any(), any(), any()); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + public void testSubscribeRegistersTask() { + var processor = streamingTaskManager.create(taskType, taskAction); + + processor.subscribe(mock()); + + verify(taskManager, only()).register(eq(taskType), eq(taskAction), any()); + } + + public void testCancelPropagatesUpstreamAndDownstream() { + var task = new AtomicReference(); + doAnswer(ans -> { + TaskAwareRequest taskAwareRequest = ans.getArgument(2); + var registeredTask = (CancellableTask) taskAwareRequest.createTask(1L, taskType, taskAction, TaskId.EMPTY_TASK_ID, Map.of()); + task.set(registeredTask); + return registeredTask; + }).when(taskManager).register(any(), any(), any()); + + Flow.Subscriber downstream = mock(); + Flow.Subscription upstream = mock(); + + var processor = streamingTaskManager.create(taskType, taskAction); + processor.subscribe(downstream); + processor.onSubscribe(upstream); + + TaskCancelHelper.cancel(task.get(), "for testing"); + + verify(downstream, times(1)).onComplete(); + verify(upstream, times(1)).cancel(); + } + + public void testRequestBeforeOnSubscribe() { + var processor = streamingTaskManager.create(taskType, taskAction); + var expectedRequestCount = randomLongBetween(2, 100); + + Flow.Subscriber downstream = mock(); + processor.subscribe(downstream); + + var subscription = ArgumentCaptor.forClass(Flow.Subscription.class); + verify(downstream, times(1)).onSubscribe(subscription.capture()); + subscription.getValue().request(expectedRequestCount); + + Flow.Subscription upstream = mock(); + processor.onSubscribe(upstream); + verify(upstream, times(1)).request(eq(expectedRequestCount)); + } + + public void testRequestAfterOnSubscribe() { + var processor = streamingTaskManager.create(taskType, taskAction); + var expectedRequestCount = randomLongBetween(2, 100); + + Flow.Subscription upstream = mock(); + processor.onSubscribe(upstream); + verify(upstream, never()).request(anyInt()); + + Flow.Subscriber downstream = mock(); + processor.subscribe(downstream); + + var subscription = ArgumentCaptor.forClass(Flow.Subscription.class); + verify(downstream, times(1)).onSubscribe(subscription.capture()); + + subscription.getValue().request(expectedRequestCount); + verify(upstream, times(1)).request(eq(expectedRequestCount)); + } + + public void testOnErrorUnregistersTask() { + var expectedError = new IllegalStateException("blah"); + var processor = streamingTaskManager.create(taskType, taskAction); + var downstream = establishFlow(processor); + + processor.onError(expectedError); + + verify(downstream, times(1)).onError(expectedError); + verify(taskManager, times(1)).unregister(any()); + } + + private Flow.Subscriber establishFlow(Flow.Processor processor) { + Flow.Subscription upstream = mock(); + processor.onSubscribe(upstream); + Flow.Subscriber downstream = mock(); + processor.subscribe(downstream); + return downstream; + } + + public void testOnCompleteUnregistersTask() { + var processor = streamingTaskManager.create(taskType, taskAction); + var downstream = establishFlow(processor); + + processor.onComplete(); + + verify(downstream, times(1)).onComplete(); + verify(taskManager, times(1)).unregister(any()); + } + + public void testOnNextForwardsItem() { + var expectedItem = new Object(); + var processor = streamingTaskManager.create(taskType, taskAction); + var downstream = establishFlow(processor); + + processor.onNext(expectedItem); + + verify(downstream, times(1)).onNext(same(expectedItem)); + } + + public void testOnNextAfterCancelDoesNotForwardItem() { + var expectedItem = new Object(); + var task = new AtomicReference(); + doAnswer(ans -> { + TaskAwareRequest taskAwareRequest = ans.getArgument(2); + var registeredTask = (CancellableTask) taskAwareRequest.createTask(1L, taskType, taskAction, TaskId.EMPTY_TASK_ID, Map.of()); + task.set(registeredTask); + return registeredTask; + }).when(taskManager).register(any(), any(), any()); + + var processor = streamingTaskManager.create(taskType, taskAction); + var downstream = establishFlow(processor); + + TaskCancelHelper.cancel(task.get(), "test"); + processor.onNext(expectedItem); + + verify(downstream, never()).onNext(any()); + } + + public void testCancelBeforeSubscriptionThrowsException() { + var processor = streamingTaskManager.create(taskType, taskAction); + + assertThrows(IllegalStateException.class, () -> processor.onError(new NullPointerException())); + } +} From adfbb3d22c2ad83be5ba629dd32c99de8323ff1a Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 3 Sep 2024 09:19:10 -0700 Subject: [PATCH 015/115] Fix synthetic source tests for shape (#112373) --- ...GeoShapeWithDocValuesFieldMapperTests.java | 2 +- .../GeometricShapeSyntheticSourceSupport.java | 92 +++++++++++-------- .../index/mapper/ShapeFieldMapperTests.java | 2 +- 3 files changed, 54 insertions(+), 42 deletions(-) diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java index 55fcf1b7d39b4..5999a3ff1e151 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java @@ -428,7 +428,7 @@ protected Object generateRandomInputValue(MappedFieldType ft) { @Override protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { - return new GeometricShapeSyntheticSourceSupport(ignoreMalformed); + return new GeometricShapeSyntheticSourceSupport(GeometricShapeSyntheticSourceSupport.FieldType.GEO_SHAPE, ignoreMalformed); } @Override diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeometricShapeSyntheticSourceSupport.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeometricShapeSyntheticSourceSupport.java index 4325eb41ceefa..e1a91bb592ce7 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeometricShapeSyntheticSourceSupport.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeometricShapeSyntheticSourceSupport.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.mapper.MapperTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.spatial.util.GeoTestUtils; import java.io.IOException; import java.util.List; @@ -32,9 +33,11 @@ * Synthetic source support for fields the index geometry shapes: shape, geo_shape. */ public class GeometricShapeSyntheticSourceSupport implements MapperTestCase.SyntheticSourceSupport { + private final FieldType fieldType; private final boolean ignoreMalformed; - public GeometricShapeSyntheticSourceSupport(boolean ignoreMalformed) { + public GeometricShapeSyntheticSourceSupport(FieldType fieldType, boolean ignoreMalformed) { + this.fieldType = fieldType; this.ignoreMalformed = ignoreMalformed; } @@ -92,48 +95,42 @@ private Value generateValue() { var type = randomFrom(ShapeType.values()); var isGeoJson = randomBoolean(); - return switch (type) { - // LINEARRING and CIRCLE are not supported as inputs to fields so just return points - case POINT, LINEARRING, CIRCLE -> { - var point = GeometryTestUtils.randomPoint(false); - yield value(point, isGeoJson); + while (true) { + Geometry candidateGeometry = switch (type) { + // LINEARRING and CIRCLE are not supported as inputs to fields so just return points + case POINT, LINEARRING, CIRCLE -> GeometryTestUtils.randomPoint(false); + case MULTIPOINT -> GeometryTestUtils.randomMultiPoint(false); + case LINESTRING -> GeometryTestUtils.randomLine(false); + case MULTILINESTRING -> GeometryTestUtils.randomMultiLine(false); + case POLYGON -> GeometryTestUtils.randomPolygon(false); + case MULTIPOLYGON -> GeometryTestUtils.randomMultiPolygon(false); + case GEOMETRYCOLLECTION -> GeometryTestUtils.randomGeometryCollectionWithoutCircle(false); + case ENVELOPE -> GeometryTestUtils.randomRectangle(); + }; + + try { + if (fieldType == FieldType.GEO_SHAPE) { + GeoTestUtils.binaryGeoShapeDocValuesField("f", candidateGeometry); + } else { + GeoTestUtils.binaryCartesianShapeDocValuesField("f", candidateGeometry); + } + + if (type == ShapeType.ENVELOPE) { + var wktString = WellKnownText.toWKT(candidateGeometry); + + return new Value(wktString, wktString); + } + + return value(candidateGeometry, isGeoJson); + } catch (IllegalArgumentException ignored) { + // It's malformed somehow, loop } - case MULTIPOINT -> { - var multiPoint = GeometryTestUtils.randomMultiPoint(false); - yield value(multiPoint, isGeoJson); - } - case LINESTRING -> { - var line = GeometryTestUtils.randomLine(false); - yield value(line, isGeoJson); - } - case MULTILINESTRING -> { - var multiPoint = GeometryTestUtils.randomMultiLine(false); - yield value(multiPoint, isGeoJson); - } - case POLYGON -> { - var polygon = GeometryTestUtils.randomPolygon(false); - yield value(polygon, isGeoJson); - } - case MULTIPOLYGON -> { - var multiPolygon = GeometryTestUtils.randomMultiPolygon(false); - yield value(multiPolygon, isGeoJson); - } - case GEOMETRYCOLLECTION -> { - var multiPolygon = GeometryTestUtils.randomGeometryCollectionWithoutCircle(false); - yield value(multiPolygon, isGeoJson); - } - case ENVELOPE -> { - var rectangle = GeometryTestUtils.randomRectangle(); - var wktString = WellKnownText.toWKT(rectangle); - - yield new Value(wktString, wktString); - } - }; + } } - private static Value value(Geometry geometry, boolean isGeoJson) { + private Value value(Geometry geometry, boolean isGeoJson) { var wktString = WellKnownText.toWKT(geometry); - var normalizedWktString = GeometryNormalizer.needsNormalize(Orientation.RIGHT, geometry) + var normalizedWktString = fieldType == FieldType.GEO_SHAPE && GeometryNormalizer.needsNormalize(Orientation.RIGHT, geometry) ? WellKnownText.toWKT(GeometryNormalizer.apply(Orientation.RIGHT, geometry)) : wktString; @@ -146,7 +143,7 @@ private static Value value(Geometry geometry, boolean isGeoJson) { } private void mapping(XContentBuilder b) throws IOException { - b.field("type", "geo_shape"); + b.field("type", fieldType.getName()); if (rarely()) { b.field("index", false); } @@ -162,4 +159,19 @@ private void mapping(XContentBuilder b) throws IOException { public List invalidExample() throws IOException { return List.of(); } + + public enum FieldType { + GEO_SHAPE("geo_shape"), + SHAPE("shape"); + + private final String name; + + FieldType(String name) { + this.name = name; + } + + public String getName() { + return name; + } + } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java index 28297f32297e6..35ccfe8deb5fe 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java @@ -368,7 +368,7 @@ protected Object generateRandomInputValue(MappedFieldType ft) { @Override protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { - return new GeometricShapeSyntheticSourceSupport(ignoreMalformed); + return new GeometricShapeSyntheticSourceSupport(GeometricShapeSyntheticSourceSupport.FieldType.SHAPE, ignoreMalformed); } @Override From c455984820a3d1825c04ecdc4a3246e76e512e5c Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 4 Sep 2024 02:43:12 +1000 Subject: [PATCH 016/115] Mute org.elasticsearch.xpack.inference.external.http.RequestBasedTaskRunnerTests testLoopOneAtATime #112471 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 6f52c67ab5170..45c286375a1ed 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -185,6 +185,9 @@ tests: - class: org.elasticsearch.xpack.esql.action.ManyShardsIT method: testConcurrentQueries issue: https://github.com/elastic/elasticsearch/issues/112424 +- class: org.elasticsearch.xpack.inference.external.http.RequestBasedTaskRunnerTests + method: testLoopOneAtATime + issue: https://github.com/elastic/elasticsearch/issues/112471 # Examples: # From b0a00561d18867021a67b443e2b7ebbe0ff3d699 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 3 Sep 2024 10:42:29 -0700 Subject: [PATCH 017/115] Add Github action for keeping jdk upgrade branch in sync --- .github/workflows/sync-main-to-jdk-branch.yml | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .github/workflows/sync-main-to-jdk-branch.yml diff --git a/.github/workflows/sync-main-to-jdk-branch.yml b/.github/workflows/sync-main-to-jdk-branch.yml new file mode 100644 index 0000000000000..a04db44fe6c54 --- /dev/null +++ b/.github/workflows/sync-main-to-jdk-branch.yml @@ -0,0 +1,20 @@ +# Daily update of JDK update branch with changes from main +name: "Merge main to es-9.0-bump branch" +on: + schedule: + - cron: '0 5 * * *' + +jobs: + merge-branch: + runs-on: ubuntu-latest + steps: + - name: checkout + uses: actions/checkout@master + + - name: merge + uses: devmasx/merge-branch@1.4.0 + with: + type: 'now' + from_branch: main + target_branch: es-9.0-bump + github_token: ${{ secrets.ELASTICSEARCHMACHINE_TOKEN }} From 42ac585e7f92f10ce2271438e640d51efd1bd417 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 3 Sep 2024 10:47:02 -0700 Subject: [PATCH 018/115] Allow for manual invocation --- .github/workflows/sync-main-to-jdk-branch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/sync-main-to-jdk-branch.yml b/.github/workflows/sync-main-to-jdk-branch.yml index a04db44fe6c54..9570ca96a3370 100644 --- a/.github/workflows/sync-main-to-jdk-branch.yml +++ b/.github/workflows/sync-main-to-jdk-branch.yml @@ -3,6 +3,7 @@ name: "Merge main to es-9.0-bump branch" on: schedule: - cron: '0 5 * * *' + workflow_dispatch: {} jobs: merge-branch: From aefdd7680f7887e2bac4297cfadf8a5c3cdc9bd6 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 3 Sep 2024 10:51:44 -0700 Subject: [PATCH 019/115] Fix branch name in github action nworkflow --- .github/workflows/sync-main-to-jdk-branch.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sync-main-to-jdk-branch.yml b/.github/workflows/sync-main-to-jdk-branch.yml index 9570ca96a3370..ff87d5cd71178 100644 --- a/.github/workflows/sync-main-to-jdk-branch.yml +++ b/.github/workflows/sync-main-to-jdk-branch.yml @@ -1,5 +1,5 @@ # Daily update of JDK update branch with changes from main -name: "Merge main to es-9.0-bump branch" +name: "Merge main to openjdk23-bundle branch" on: schedule: - cron: '0 5 * * *' @@ -17,5 +17,5 @@ jobs: with: type: 'now' from_branch: main - target_branch: es-9.0-bump + target_branch: openjdk23-bundle github_token: ${{ secrets.ELASTICSEARCHMACHINE_TOKEN }} From 699194844ec2d44a45a6e932c47eacb4e3596759 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 3 Sep 2024 10:55:44 -0700 Subject: [PATCH 020/115] Remove from branch and just use current branch --- .github/workflows/sync-main-to-jdk-branch.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/sync-main-to-jdk-branch.yml b/.github/workflows/sync-main-to-jdk-branch.yml index ff87d5cd71178..1ae48c6eaa116 100644 --- a/.github/workflows/sync-main-to-jdk-branch.yml +++ b/.github/workflows/sync-main-to-jdk-branch.yml @@ -16,6 +16,5 @@ jobs: uses: devmasx/merge-branch@1.4.0 with: type: 'now' - from_branch: main target_branch: openjdk23-bundle github_token: ${{ secrets.ELASTICSEARCHMACHINE_TOKEN }} From e6525deefac463e551846b99be39407e5e0f0f44 Mon Sep 17 00:00:00 2001 From: Marc Lopez Rubio Date: Tue, 3 Sep 2024 11:38:13 -0700 Subject: [PATCH 021/115] logs-apm.error-*: define log.level field as keyword (#112440) Defines `log.level` as a `keyword` for all apm error logs. --------- Signed-off-by: Marc Lopez Rubio --- docs/changelog/112440.yaml | 5 ++++ .../logs-apm.error@mappings.yaml | 3 ++ .../src/main/resources/resources.yaml | 2 +- .../rest-api-spec/test/20_error_logs.yml | 28 +++++++++++++++++++ 4 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112440.yaml create mode 100644 x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_logs.yml diff --git a/docs/changelog/112440.yaml b/docs/changelog/112440.yaml new file mode 100644 index 0000000000000..f208474fa2686 --- /dev/null +++ b/docs/changelog/112440.yaml @@ -0,0 +1,5 @@ +pr: 112440 +summary: "logs-apm.error-*: define log.level field as keyword" +area: Data streams +type: bug +issues: [] diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml index c1d004b4e7bf4..6c83f40252354 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml @@ -6,6 +6,9 @@ _meta: template: mappings: properties: + # log.* + log.level: + type: keyword # error.* error.custom: type: object diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index 3e66769d939ad..0502a8c559ff6 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 8 +version: 9 component-templates: # Data lifecycle. diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_logs.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_logs.yml new file mode 100644 index 0000000000000..5d2a6ec29ff4c --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_logs.yml @@ -0,0 +1,28 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid +--- +"Test logs-apm.error-* error log fields": + - do: + bulk: + index: logs-apm.error-log-level-testing + refresh: true + body: + - create: {} + - '{"@timestamp": "2017-06-22", "log": {"level": "error"}, "error": {"log": {"message": "loglevel"}, "exception": [{"message": "exception_used"}]}}' + + - create: {} + - '{"@timestamp": "2017-06-22", "log": {"level": "warn"}, "error": {"log": {"message": "loglevel"}, "exception": [{"message": "exception_used"}]}}' + + - is_false: errors + + - do: + search: + index: logs-apm.error-log-level-testing + body: + fields: ["log.level"] + - length: { hits.hits: 2 } + - match: { hits.hits.0.fields: { "log.level": ["error"] } } + - match: { hits.hits.1.fields: { "log.level": ["warn"] } } From d59ca365cf27d8b26713eaa7269701360538c724 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 3 Sep 2024 11:45:18 -0700 Subject: [PATCH 022/115] Update cron schedule --- .github/workflows/sync-main-to-jdk-branch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync-main-to-jdk-branch.yml b/.github/workflows/sync-main-to-jdk-branch.yml index 1ae48c6eaa116..eea3348529284 100644 --- a/.github/workflows/sync-main-to-jdk-branch.yml +++ b/.github/workflows/sync-main-to-jdk-branch.yml @@ -2,7 +2,7 @@ name: "Merge main to openjdk23-bundle branch" on: schedule: - - cron: '0 5 * * *' + - cron: '30 17 * * *' workflow_dispatch: {} jobs: From d946d0cd98cbc2a77d1c9294e59efe40bf5da04b Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 4 Sep 2024 14:29:25 +1000 Subject: [PATCH 023/115] Mute org.elasticsearch.xpack.spatial.index.query.LegacyGeoShapeWithDocValuesQueryTests testIndexPointsFromPolygon #112464 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 45c286375a1ed..1a8b0e8526c89 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -188,6 +188,9 @@ tests: - class: org.elasticsearch.xpack.inference.external.http.RequestBasedTaskRunnerTests method: testLoopOneAtATime issue: https://github.com/elastic/elasticsearch/issues/112471 +- class: org.elasticsearch.xpack.spatial.index.query.LegacyGeoShapeWithDocValuesQueryTests + method: testIndexPointsFromPolygon + issue: https://github.com/elastic/elasticsearch/issues/112464 # Examples: # From 0aff606b357dd492735ec9bbbced6e309de54387 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Wed, 4 Sep 2024 09:47:54 +0300 Subject: [PATCH 024/115] Avoid passing in the cluster state parameter to methods related to index creation (#112466) Avoids the ClusterState type as a method parameter in favor of the more specific Metadata, RoutingTable, and ClusterBlocks ones. --- .../indices/rollover/LazyRolloverAction.java | 3 +- .../rollover/MetadataRolloverService.java | 10 +-- .../rollover/TransportRolloverAction.java | 3 +- .../metadata/MetadataCreateIndexService.java | 89 +++++++++++++------ .../metadata/MetadataIndexStateService.java | 2 +- .../MetadataUpdateSettingsService.java | 7 +- .../ShardsCapacityHealthIndicatorService.java | 36 ++++++-- .../indices/ShardLimitValidator.java | 84 +++++++++-------- .../snapshots/RestoreService.java | 17 +++- .../MetadataCreateIndexServiceTests.java | 76 ++++++++-------- ...dsCapacityHealthIndicatorServiceTests.java | 18 ++-- .../indices/ShardLimitValidatorTests.java | 25 ++++-- .../downsample/TransportDownsampleAction.java | 2 +- 13 files changed, 235 insertions(+), 137 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java index ef72fdd93caeb..65b768a1c629f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionMultiListener; @@ -134,7 +135,7 @@ protected void masterOperation( ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); - MetadataRolloverService.validateIndexName(clusterState, trialRolloverIndexName); + MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, clusterState.metadata(), clusterState.routingTable()); assert metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()) : "Auto-rollover applies only to data streams"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 9d34b9ab5f126..b8d975f82980d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -179,10 +179,6 @@ public RolloverResult rolloverClusterState( }; } - public static void validateIndexName(ClusterState state, String index) { - MetadataCreateIndexService.validateIndexName(index, state); - } - /** * Returns the names that rollover would use, but does not perform the actual rollover */ @@ -252,7 +248,8 @@ private RolloverResult rolloverAlias( final Boolean isHidden = IndexMetadata.INDEX_HIDDEN_SETTING.exists(createIndexRequest.settings()) ? IndexMetadata.INDEX_HIDDEN_SETTING.get(createIndexRequest.settings()) : null; - MetadataCreateIndexService.validateIndexName(rolloverIndexName, currentState); // fails if the index already exists + MetadataCreateIndexService.validateIndexName(rolloverIndexName, metadata, currentState.routingTable()); // fails if the index + // already exists checkNoDuplicatedAliasInIndexTemplate(metadata, rolloverIndexName, aliasName, isHidden); if (onlyValidate) { return new RolloverResult(rolloverIndexName, sourceIndexName, currentState); @@ -328,7 +325,8 @@ private RolloverResult rolloverDataStream( final Tuple nextIndexAndGeneration = dataStream.nextWriteIndexAndGeneration(metadata, dataStreamIndices); final String newWriteIndexName = nextIndexAndGeneration.v1(); final long newGeneration = nextIndexAndGeneration.v2(); - MetadataCreateIndexService.validateIndexName(newWriteIndexName, currentState); // fails if the index already exists + MetadataCreateIndexService.validateIndexName(newWriteIndexName, metadata, currentState.routingTable()); // fails if the index + // already exists if (onlyValidate) { return new RolloverResult(newWriteIndexName, isLazyCreation ? NON_EXISTENT_SOURCE : originalWriteIndex.getName(), currentState); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 9df3be1994fdf..c997795bb3b89 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionMultiListener; @@ -179,7 +180,7 @@ protected void masterOperation( ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); - MetadataRolloverService.validateIndexName(clusterState, trialRolloverIndexName); + MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, metadata, clusterState.routingTable()); boolean isDataStream = metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()); if (rolloverRequest.isLazy()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 07dcb7baf0777..02b7312b4a99d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -167,7 +167,7 @@ public MetadataCreateIndexService( /** * Validate the name for an index against some static rules and a cluster state. */ - public static void validateIndexName(String index, ClusterState state) { + public static void validateIndexName(String index, Metadata metadata, RoutingTable routingTable) { validateIndexOrAliasName(index, InvalidIndexNameException::new); if (index.toLowerCase(Locale.ROOT).equals(index) == false) { throw new InvalidIndexNameException(index, "must be lowercase"); @@ -175,13 +175,13 @@ public static void validateIndexName(String index, ClusterState state) { // NOTE: dot-prefixed index names are validated after template application, not here - if (state.routingTable().hasIndex(index)) { - throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); + if (routingTable.hasIndex(index)) { + throw new ResourceAlreadyExistsException(routingTable.index(index).getIndex()); } - if (state.metadata().hasIndex(index)) { - throw new ResourceAlreadyExistsException(state.metadata().index(index).getIndex()); + if (metadata.hasIndex(index)) { + throw new ResourceAlreadyExistsException(metadata.index(index).getIndex()); } - if (state.metadata().hasAlias(index)) { + if (metadata.hasAlias(index)) { throw new InvalidIndexNameException(index, "already exists as alias"); } } @@ -344,7 +344,7 @@ public ClusterState applyCreateIndexRequest( normalizeRequestSetting(request); logger.trace("executing IndexCreationTask for [{}] against cluster state version [{}]", request, currentState.version()); - validate(request, currentState); + validate(request, currentState.metadata(), currentState.routingTable()); final Index recoverFromIndex = request.recoverFrom(); final IndexMetadata sourceMetadata = recoverFromIndex == null ? null : currentState.metadata().getIndexSafe(recoverFromIndex); @@ -1069,7 +1069,9 @@ static Settings aggregateIndexSettings( if (sourceMetadata != null) { assert request.resizeType() != null; prepareResizeIndexSettings( - currentState, + currentState.metadata(), + currentState.blocks(), + currentState.routingTable(), indexSettingsBuilder, request.recoverFrom(), request.index(), @@ -1084,7 +1086,7 @@ static Settings aggregateIndexSettings( * We can not validate settings until we have applied templates, otherwise we do not know the actual settings * that will be used to create this index. */ - shardLimitValidator.validateShardLimit(indexSettings, currentState); + shardLimitValidator.validateShardLimit(indexSettings, currentState.nodes(), currentState.metadata()); validateSoftDeleteSettings(indexSettings); validateTranslogRetentionSettings(indexSettings); validateStoreTypeSetting(indexSettings); @@ -1363,8 +1365,8 @@ private static void validateActiveShardCount(ActiveShardCount waitForActiveShard } } - private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { - validateIndexName(request.index(), state); + private void validate(CreateIndexClusterStateUpdateRequest request, Metadata metadata, RoutingTable routingTable) { + validateIndexName(request.index(), metadata, routingTable); validateIndexSettings(request.index(), request.settings(), forbidPrivateIndexSettings); } @@ -1428,8 +1430,15 @@ private static List validateIndexCustomPath(Settings settings, @Nullable * * @return the list of nodes at least one instance of the source index shards are allocated */ - static List validateShrinkIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { - IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); + static List validateShrinkIndex( + Metadata metadata, + ClusterBlocks clusterBlocks, + RoutingTable routingTable, + String sourceIndex, + String targetIndexName, + Settings targetIndexSettings + ) { + IndexMetadata sourceMetadata = validateResize(metadata, clusterBlocks, sourceIndex, targetIndexName, targetIndexSettings); if (sourceMetadata.isSearchableSnapshot()) { throw new IllegalArgumentException("can't shrink searchable snapshot index [" + sourceIndex + ']'); } @@ -1441,7 +1450,7 @@ static List validateShrinkIndex(ClusterState state, String sourceIndex, } // now check that index is all on one node - final IndexRoutingTable table = state.routingTable().index(sourceIndex); + final IndexRoutingTable table = routingTable.index(sourceIndex); Map nodesToNumRouting = new HashMap<>(); int numShards = sourceMetadata.getNumberOfShards(); for (ShardRouting routing : table.shardsWithState(ShardRoutingState.STARTED)) { @@ -1461,16 +1470,28 @@ static List validateShrinkIndex(ClusterState state, String sourceIndex, return nodesToAllocateOn; } - static void validateSplitIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { - IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); + static void validateSplitIndex( + Metadata metadata, + ClusterBlocks clusterBlocks, + String sourceIndex, + String targetIndexName, + Settings targetIndexSettings + ) { + IndexMetadata sourceMetadata = validateResize(metadata, clusterBlocks, sourceIndex, targetIndexName, targetIndexSettings); if (sourceMetadata.isSearchableSnapshot()) { throw new IllegalArgumentException("can't split searchable snapshot index [" + sourceIndex + ']'); } IndexMetadata.selectSplitShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } - static void validateCloneIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { - IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); + static void validateCloneIndex( + Metadata metadata, + ClusterBlocks clusterBlocks, + String sourceIndex, + String targetIndexName, + Settings targetIndexSettings + ) { + IndexMetadata sourceMetadata = validateResize(metadata, clusterBlocks, sourceIndex, targetIndexName, targetIndexSettings); if (sourceMetadata.isSearchableSnapshot()) { for (Setting nonCloneableSetting : Arrays.asList(INDEX_STORE_TYPE_SETTING, INDEX_RECOVERY_TYPE_SETTING)) { if (nonCloneableSetting.exists(targetIndexSettings) == false) { @@ -1487,16 +1508,22 @@ static void validateCloneIndex(ClusterState state, String sourceIndex, String ta IndexMetadata.selectCloneShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } - static IndexMetadata validateResize(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { - if (state.metadata().hasIndex(targetIndexName)) { - throw new ResourceAlreadyExistsException(state.metadata().index(targetIndexName).getIndex()); + static IndexMetadata validateResize( + Metadata metadata, + ClusterBlocks clusterBlocks, + String sourceIndex, + String targetIndexName, + Settings targetIndexSettings + ) { + if (metadata.hasIndex(targetIndexName)) { + throw new ResourceAlreadyExistsException(metadata.index(targetIndexName).getIndex()); } - final IndexMetadata sourceMetadata = state.metadata().index(sourceIndex); + final IndexMetadata sourceMetadata = metadata.index(sourceIndex); if (sourceMetadata == null) { throw new IndexNotFoundException(sourceIndex); } - IndexAbstraction source = state.metadata().getIndicesLookup().get(sourceIndex); + IndexAbstraction source = metadata.getIndicesLookup().get(sourceIndex); assert source != null; if (source.getParentDataStream() != null && source.getParentDataStream().getWriteIndex().equals(sourceMetadata.getIndex())) { throw new IllegalArgumentException( @@ -1509,7 +1536,7 @@ static IndexMetadata validateResize(ClusterState state, String sourceIndex, Stri ); } // ensure index is read-only - if (state.blocks().indexBlocked(ClusterBlockLevel.WRITE, sourceIndex) == false) { + if (clusterBlocks.indexBlocked(ClusterBlockLevel.WRITE, sourceIndex) == false) { throw new IllegalStateException("index " + sourceIndex + " must be read-only to resize index. use \"index.blocks.write=true\""); } @@ -1522,7 +1549,9 @@ static IndexMetadata validateResize(ClusterState state, String sourceIndex, Stri } static void prepareResizeIndexSettings( - final ClusterState currentState, + final Metadata metadata, + final ClusterBlocks clusterBlocks, + final RoutingTable routingTable, final Settings.Builder indexSettingsBuilder, final Index resizeSourceIndex, final String resizeIntoName, @@ -1530,20 +1559,22 @@ static void prepareResizeIndexSettings( final boolean copySettings, final IndexScopedSettings indexScopedSettings ) { - final IndexMetadata sourceMetadata = currentState.metadata().index(resizeSourceIndex.getName()); + final IndexMetadata sourceMetadata = metadata.index(resizeSourceIndex.getName()); if (type == ResizeType.SHRINK) { final List nodesToAllocateOn = validateShrinkIndex( - currentState, + metadata, + clusterBlocks, + routingTable, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build() ); indexSettingsBuilder.put(INDEX_SHRINK_INITIAL_RECOVERY_KEY, Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())); } else if (type == ResizeType.SPLIT) { - validateSplitIndex(currentState, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build()); + validateSplitIndex(metadata, clusterBlocks, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build()); indexSettingsBuilder.putNull(INDEX_SHRINK_INITIAL_RECOVERY_KEY); } else if (type == ResizeType.CLONE) { - validateCloneIndex(currentState, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build()); + validateCloneIndex(metadata, clusterBlocks, resizeSourceIndex.getName(), resizeIntoName, indexSettingsBuilder.build()); indexSettingsBuilder.putNull(INDEX_SHRINK_INITIAL_RECOVERY_KEY); } else { throw new IllegalStateException("unknown resize type is " + type); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index be12198cbaaaa..272c107883043 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -1100,7 +1100,7 @@ private ClusterState openIndices(final Index[] indices, final ClusterState curre } } - shardLimitValidator.validateShardLimit(currentState, indices); + shardLimitValidator.validateShardLimit(currentState.nodes(), currentState.metadata(), indices); if (indicesToOpen.isEmpty()) { return currentState; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 5891b953acfca..3272462dd3725 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -255,7 +255,12 @@ ClusterState execute(ClusterState currentState) { final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings); if (preserveExisting == false) { // Verify that this won't take us over the cluster shard limit. - shardLimitValidator.validateShardLimitOnReplicaUpdate(currentState, request.indices(), updatedNumberOfReplicas); + shardLimitValidator.validateShardLimitOnReplicaUpdate( + currentState.nodes(), + currentState.metadata(), + request.indices(), + updatedNumberOfReplicas + ); /* * We do not update the in-sync allocation IDs as they will be removed upon the first index operation diff --git a/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java index e5ced00905744..e591e8a681764 100644 --- a/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java @@ -8,7 +8,8 @@ package org.elasticsearch.health.node; -import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Setting; @@ -124,8 +125,18 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); return mergeIndicators( verbose, - calculateFrom(shardLimitsMetadata.maxShardsPerNode(), state, ShardLimitValidator::checkShardLimitForNormalNodes), - calculateFrom(shardLimitsMetadata.maxShardsPerNodeFrozen(), state, ShardLimitValidator::checkShardLimitForFrozenNodes) + calculateFrom( + shardLimitsMetadata.maxShardsPerNode(), + state.nodes(), + state.metadata(), + ShardLimitValidator::checkShardLimitForNormalNodes + ), + calculateFrom( + shardLimitsMetadata.maxShardsPerNodeFrozen(), + state.nodes(), + state.metadata(), + ShardLimitValidator::checkShardLimitForFrozenNodes + ) ); } @@ -173,13 +184,18 @@ private HealthIndicatorResult mergeIndicators(boolean verbose, StatusResult data ); } - static StatusResult calculateFrom(int maxShardsPerNodeSetting, ClusterState state, ShardsCapacityChecker checker) { - var result = checker.check(maxShardsPerNodeSetting, 5, 1, state); + static StatusResult calculateFrom( + int maxShardsPerNodeSetting, + DiscoveryNodes discoveryNodes, + Metadata metadata, + ShardsCapacityChecker checker + ) { + var result = checker.check(maxShardsPerNodeSetting, 5, 1, discoveryNodes, metadata); if (result.canAddShards() == false) { return new StatusResult(HealthStatus.RED, result); } - result = checker.check(maxShardsPerNodeSetting, 10, 1, state); + result = checker.check(maxShardsPerNodeSetting, 10, 1, discoveryNodes, metadata); if (result.canAddShards() == false) { return new StatusResult(HealthStatus.YELLOW, result); } @@ -225,6 +241,12 @@ record StatusResult(HealthStatus status, ShardLimitValidator.Result result) {} @FunctionalInterface interface ShardsCapacityChecker { - ShardLimitValidator.Result check(int maxConfiguredShardsPerNode, int numberOfNewShards, int replicas, ClusterState state); + ShardLimitValidator.Result check( + int maxConfiguredShardsPerNode, + int numberOfNewShards, + int replicas, + DiscoveryNodes discoveryNodes, + Metadata metadata + ); } } diff --git a/server/src/main/java/org/elasticsearch/indices/ShardLimitValidator.java b/server/src/main/java/org/elasticsearch/indices/ShardLimitValidator.java index f58ee757cc511..b8841adb4d885 100644 --- a/server/src/main/java/org/elasticsearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/elasticsearch/indices/ShardLimitValidator.java @@ -8,10 +8,11 @@ package org.elasticsearch.indices; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.ValidationException; @@ -95,16 +96,22 @@ public int getShardLimitPerNode() { * Checks whether an index can be created without going over the cluster shard limit. * * @param settings the settings of the index to be created - * @param state the current cluster state + * @param discoveryNodes the nodes in the cluster + * @param metadata the cluster state metadata * @throws ValidationException if creating this index would put the cluster over the cluster shard limit */ - public void validateShardLimit(final Settings settings, final ClusterState state) { + public void validateShardLimit(final Settings settings, final DiscoveryNodes discoveryNodes, final Metadata metadata) { final int numberOfShards = INDEX_NUMBER_OF_SHARDS_SETTING.get(settings); final int numberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings); final int shardsToCreate = numberOfShards * (1 + numberOfReplicas); final boolean frozen = FROZEN_GROUP.equals(INDEX_SETTING_SHARD_LIMIT_GROUP.get(settings)); - final var result = checkShardLimitOnBothGroups(frozen == false ? shardsToCreate : 0, frozen ? shardsToCreate : 0, state); + final var result = checkShardLimitOnBothGroups( + frozen == false ? shardsToCreate : 0, + frozen ? shardsToCreate : 0, + discoveryNodes, + metadata + ); if (result.canAddShards == false) { final ValidationException e = new ValidationException(); e.addValidationError(errorMessageFrom(result)); @@ -116,15 +123,16 @@ public void validateShardLimit(final Settings settings, final ClusterState state * Validates whether a list of indices can be opened without going over the cluster shard limit. Only counts indices which are * currently closed and will be opened, ignores indices which are already open. * - * @param currentState The current cluster state. - * @param indicesToOpen The indices which are to be opened. + * @param discoveryNodes The nodes in the cluster + * @param metadata The cluster state metadata + * @param indicesToOpen The indices which are to be opened. * @throws ValidationException If this operation would take the cluster over the limit and enforcement is enabled. */ - public void validateShardLimit(ClusterState currentState, Index[] indicesToOpen) { + public void validateShardLimit(DiscoveryNodes discoveryNodes, Metadata metadata, Index[] indicesToOpen) { int frozen = 0; int normal = 0; for (Index index : indicesToOpen) { - IndexMetadata imd = currentState.metadata().index(index); + IndexMetadata imd = metadata.index(index); if (imd.getState().equals(IndexMetadata.State.CLOSE)) { int totalNewShards = imd.getNumberOfShards() * (1 + imd.getNumberOfReplicas()); if (FROZEN_GROUP.equals(INDEX_SETTING_SHARD_LIMIT_GROUP.get(imd.getSettings()))) { @@ -135,7 +143,7 @@ public void validateShardLimit(ClusterState currentState, Index[] indicesToOpen) } } - var result = checkShardLimitOnBothGroups(normal, frozen, currentState); + var result = checkShardLimitOnBothGroups(normal, frozen, discoveryNodes, metadata); if (result.canAddShards == false) { ValidationException ex = new ValidationException(); ex.addValidationError(errorMessageFrom(result)); @@ -143,12 +151,12 @@ public void validateShardLimit(ClusterState currentState, Index[] indicesToOpen) } } - public void validateShardLimitOnReplicaUpdate(ClusterState currentState, Index[] indices, int replicas) { + public void validateShardLimitOnReplicaUpdate(DiscoveryNodes discoveryNodes, Metadata metadata, Index[] indices, int replicas) { int frozen = 0; int normal = 0; for (Index index : indices) { - IndexMetadata imd = currentState.metadata().index(index); - int totalNewShards = getTotalNewShards(index, currentState, replicas); + IndexMetadata imd = metadata.index(index); + int totalNewShards = getTotalNewShards(index, metadata, replicas); if (FROZEN_GROUP.equals(INDEX_SETTING_SHARD_LIMIT_GROUP.get(imd.getSettings()))) { frozen += totalNewShards; } else { @@ -156,7 +164,7 @@ public void validateShardLimitOnReplicaUpdate(ClusterState currentState, Index[] } } - var result = checkShardLimitOnBothGroups(normal, frozen, currentState); + var result = checkShardLimitOnBothGroups(normal, frozen, discoveryNodes, metadata); if (result.canAddShards == false) { ValidationException ex = new ValidationException(); ex.addValidationError(errorMessageFrom(result)); @@ -164,8 +172,8 @@ public void validateShardLimitOnReplicaUpdate(ClusterState currentState, Index[] } } - private static int getTotalNewShards(Index index, ClusterState currentState, int updatedNumberOfReplicas) { - IndexMetadata indexMetadata = currentState.metadata().index(index); + private static int getTotalNewShards(Index index, Metadata metadata, int updatedNumberOfReplicas) { + IndexMetadata indexMetadata = metadata.index(index); int shardsInIndex = indexMetadata.getNumberOfShards(); int oldNumberOfReplicas = indexMetadata.getNumberOfReplicas(); int replicaIncrease = updatedNumberOfReplicas - oldNumberOfReplicas; @@ -181,21 +189,22 @@ private static int getTotalNewShards(Index index, ClusterState currentState, int * * @param newShards The number of normal shards to be added by this operation * @param newFrozenShards The number of frozen shards to be added by this operation - * @param state The current cluster state + * @param discoveryNodes The nodes in the cluster + * @param metadata The cluster state metadata */ - private Result checkShardLimitOnBothGroups(int newShards, int newFrozenShards, ClusterState state) { + private Result checkShardLimitOnBothGroups(int newShards, int newFrozenShards, DiscoveryNodes discoveryNodes, Metadata metadata) { // we verify the two limits independently. This also means that if they have mixed frozen and other data-roles nodes, such a mixed // node can have both 1000 normal and 3000 frozen shards. This is the trade-off to keep the simplicity of the counts. We advocate // against such mixed nodes for production use anyway. - int frozenNodeCount = nodeCount(state, ShardLimitValidator::hasFrozen); - int normalNodeCount = nodeCount(state, ShardLimitValidator::hasNonFrozen); + int frozenNodeCount = nodeCount(discoveryNodes, ShardLimitValidator::hasFrozen); + int normalNodeCount = nodeCount(discoveryNodes, ShardLimitValidator::hasNonFrozen); - var result = checkShardLimit(newShards, state, getShardLimitPerNode(), normalNodeCount, NORMAL_GROUP); + var result = checkShardLimit(newShards, metadata, getShardLimitPerNode(), normalNodeCount, NORMAL_GROUP); // fail-fast: in case there's no room on the `normal` nodes, just return the result of that check. if (result.canAddShards() == false) { return result; } - return checkShardLimit(newFrozenShards, state, shardLimitPerNodeFrozen.get(), frozenNodeCount, FROZEN_GROUP); + return checkShardLimit(newFrozenShards, metadata, shardLimitPerNodeFrozen.get(), frozenNodeCount, FROZEN_GROUP); } /** @@ -205,20 +214,21 @@ private Result checkShardLimitOnBothGroups(int newShards, int newFrozenShards, C * @param maxConfiguredShardsPerNode The maximum available number of shards to be allocated within a node * @param numberOfNewShards The number of primary shards that we want to be able to add to the cluster * @param replicas The number of replicas of the primary shards that we want to be able to add to the cluster - * @param state The cluster state, used to get cluster settings and to get the number of open shards already in - * the cluster + * @param discoveryNodes The nodes in the cluster, used to get the number of open shard already in the cluster + * @param metadata The cluster state metadata, used to get the cluster settings */ public static Result checkShardLimitForNormalNodes( int maxConfiguredShardsPerNode, int numberOfNewShards, int replicas, - ClusterState state + DiscoveryNodes discoveryNodes, + Metadata metadata ) { return checkShardLimit( numberOfNewShards * (1 + replicas), - state, + metadata, maxConfiguredShardsPerNode, - nodeCount(state, ShardLimitValidator::hasNonFrozen), + nodeCount(discoveryNodes, ShardLimitValidator::hasNonFrozen), NORMAL_GROUP ); } @@ -230,27 +240,28 @@ public static Result checkShardLimitForNormalNodes( * @param maxConfiguredShardsPerNode The maximum available number of shards to be allocated within a node * @param numberOfNewShards The number of primary shards that we want to be able to add to the cluster * @param replicas The number of replicas of the primary shards that we want to be able to add to the cluster - * @param state The cluster state, used to get cluster settings and to get the number of open shards already in - * the cluster + * @param discoveryNodes The nodes in the cluster, used to get the number of open shard already in the cluster + * @param metadata The cluster state metadata, used to get the cluster settings */ public static Result checkShardLimitForFrozenNodes( int maxConfiguredShardsPerNode, int numberOfNewShards, int replicas, - ClusterState state + DiscoveryNodes discoveryNodes, + Metadata metadata ) { return checkShardLimit( numberOfNewShards * (1 + replicas), - state, + metadata, maxConfiguredShardsPerNode, - nodeCount(state, ShardLimitValidator::hasFrozen), + nodeCount(discoveryNodes, ShardLimitValidator::hasFrozen), FROZEN_GROUP ); } - private static Result checkShardLimit(int newShards, ClusterState state, int maxConfiguredShardsPerNode, int nodeCount, String group) { + private static Result checkShardLimit(int newShards, Metadata metadata, int maxConfiguredShardsPerNode, int nodeCount, String group) { int maxShardsInCluster = maxConfiguredShardsPerNode * nodeCount; - int currentOpenShards = state.getMetadata().getTotalOpenIndexShards(); + int currentOpenShards = metadata.getTotalOpenIndexShards(); // Only enforce the shard limit if we have at least one data node, so that we don't block // index creation during cluster setup @@ -261,8 +272,7 @@ private static Result checkShardLimit(int newShards, ClusterState state, int max if ((currentOpenShards + newShards) > maxShardsInCluster) { Predicate indexMetadataPredicate = imd -> imd.getState().equals(IndexMetadata.State.OPEN) && group.equals(INDEX_SETTING_SHARD_LIMIT_GROUP.get(imd.getSettings())); - long currentFilteredShards = state.metadata() - .indices() + long currentFilteredShards = metadata.indices() .values() .stream() .filter(indexMetadataPredicate) @@ -276,8 +286,8 @@ private static Result checkShardLimit(int newShards, ClusterState state, int max return new Result(true, Optional.empty(), newShards, maxShardsInCluster, group); } - private static int nodeCount(ClusterState state, Predicate nodePredicate) { - return (int) state.getNodes().getDataNodes().values().stream().filter(nodePredicate).count(); + private static int nodeCount(DiscoveryNodes discoveryNodes, Predicate nodePredicate) { + return (int) discoveryNodes.getDataNodes().values().stream().filter(nodePredicate).count(); } private static boolean hasFrozen(DiscoveryNode node) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index d8987495f9035..a2d8d6374a457 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -1343,8 +1343,12 @@ public ClusterState execute(ClusterState currentState) { if (currentIndexMetadata == null) { // Index doesn't exist - create it and start recovery // Make sure that the index we are about to create has a valid name - ensureValidIndexName(currentState, snapshotIndexMetadata, renamedIndexName); - shardLimitValidator.validateShardLimit(snapshotIndexMetadata.getSettings(), currentState); + ensureValidIndexName(currentState.metadata(), currentState.routingTable(), snapshotIndexMetadata, renamedIndexName); + shardLimitValidator.validateShardLimit( + snapshotIndexMetadata.getSettings(), + currentState.nodes(), + currentState.metadata() + ); final IndexMetadata.Builder indexMdBuilder = restoreToCreateNewIndex( snapshotIndexMetadata, @@ -1789,9 +1793,14 @@ private static IndexMetadata.Builder restoreOverClosedIndex( return indexMdBuilder; } - private void ensureValidIndexName(ClusterState currentState, IndexMetadata snapshotIndexMetadata, String renamedIndexName) { + private void ensureValidIndexName( + Metadata metadata, + RoutingTable routingTable, + IndexMetadata snapshotIndexMetadata, + String renamedIndexName + ) { final boolean isHidden = snapshotIndexMetadata.isHidden(); - MetadataCreateIndexService.validateIndexName(renamedIndexName, currentState); + MetadataCreateIndexService.validateIndexName(renamedIndexName, metadata, routingTable); createIndexService.validateDotIndex(renamedIndexName, isHidden); createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetadata.getSettings(), false); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index f7d343b43b29c..01394a7abbcd5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -170,18 +170,14 @@ public void testValidateShrinkIndex() { assertEquals( "index [source] already exists", - expectThrows( - ResourceAlreadyExistsException.class, - () -> MetadataCreateIndexService.validateShrinkIndex(state, "target", "source", Settings.EMPTY) - ).getMessage() + expectThrows(ResourceAlreadyExistsException.class, () -> validateShrinkIndex(state, "target", "source", Settings.EMPTY)) + .getMessage() ); assertEquals( "no such index [no_such_index]", - expectThrows( - IndexNotFoundException.class, - () -> MetadataCreateIndexService.validateShrinkIndex(state, "no_such_index", "target", Settings.EMPTY) - ).getMessage() + expectThrows(IndexNotFoundException.class, () -> validateShrinkIndex(state, "no_such_index", "target", Settings.EMPTY)) + .getMessage() ); Settings targetSettings = Settings.builder().put("index.number_of_shards", 1).build(); @@ -189,7 +185,7 @@ public void testValidateShrinkIndex() { "can't shrink an index with only one shard", expectThrows( IllegalArgumentException.class, - () -> MetadataCreateIndexService.validateShrinkIndex( + () -> validateShrinkIndex( createClusterState("source", 1, 0, Settings.builder().put("index.blocks.write", true).build()), "source", "target", @@ -202,7 +198,7 @@ public void testValidateShrinkIndex() { "the number of target shards [10] must be less that the number of source shards [5]", expectThrows( IllegalArgumentException.class, - () -> MetadataCreateIndexService.validateShrinkIndex( + () -> validateShrinkIndex( createClusterState("source", 5, 0, Settings.builder().put("index.blocks.write", true).build()), "source", "target", @@ -215,7 +211,7 @@ public void testValidateShrinkIndex() { "index source must be read-only to resize index. use \"index.blocks.write=true\"", expectThrows( IllegalStateException.class, - () -> MetadataCreateIndexService.validateShrinkIndex( + () -> validateShrinkIndex( createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY), "source", "target", @@ -228,7 +224,7 @@ public void testValidateShrinkIndex() { "index source must have all shards allocated on the same node to shrink index", expectThrows( IllegalStateException.class, - () -> MetadataCreateIndexService.validateShrinkIndex(state, "source", "target", targetSettings) + () -> validateShrinkIndex(state, "source", "target", targetSettings) ).getMessage() ); @@ -236,7 +232,7 @@ public void testValidateShrinkIndex() { "the number of source shards [8] must be a multiple of [3]", expectThrows( IllegalArgumentException.class, - () -> MetadataCreateIndexService.validateShrinkIndex( + () -> validateShrinkIndex( createClusterState("source", 8, randomIntBetween(0, 10), Settings.builder().put("index.blocks.write", true).build()), "source", "target", @@ -267,12 +263,7 @@ public void testValidateShrinkIndex() { do { targetShards = randomIntBetween(1, numShards / 2); } while (isShrinkable(numShards, targetShards) == false); - MetadataCreateIndexService.validateShrinkIndex( - clusterState, - "source", - "target", - Settings.builder().put("index.number_of_shards", targetShards).build() - ); + validateShrinkIndex(clusterState, "source", "target", Settings.builder().put("index.number_of_shards", targetShards).build()); } public void testValidateSplitIndex() { @@ -287,25 +278,21 @@ public void testValidateSplitIndex() { assertEquals( "index [source] already exists", - expectThrows( - ResourceAlreadyExistsException.class, - () -> MetadataCreateIndexService.validateSplitIndex(state, "target", "source", targetSettings) - ).getMessage() + expectThrows(ResourceAlreadyExistsException.class, () -> validateSplitIndex(state, "target", "source", targetSettings)) + .getMessage() ); assertEquals( "no such index [no_such_index]", - expectThrows( - IndexNotFoundException.class, - () -> MetadataCreateIndexService.validateSplitIndex(state, "no_such_index", "target", targetSettings) - ).getMessage() + expectThrows(IndexNotFoundException.class, () -> validateSplitIndex(state, "no_such_index", "target", targetSettings)) + .getMessage() ); assertEquals( "the number of source shards [10] must be less that the number of target shards [5]", expectThrows( IllegalArgumentException.class, - () -> MetadataCreateIndexService.validateSplitIndex( + () -> validateSplitIndex( createClusterState("source", 10, 0, Settings.builder().put("index.blocks.write", true).build()), "source", "target", @@ -318,7 +305,7 @@ public void testValidateSplitIndex() { "index source must be read-only to resize index. use \"index.blocks.write=true\"", expectThrows( IllegalStateException.class, - () -> MetadataCreateIndexService.validateSplitIndex( + () -> validateSplitIndex( createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY), "source", "target", @@ -331,7 +318,7 @@ public void testValidateSplitIndex() { "the number of source shards [3] must be a factor of [4]", expectThrows( IllegalArgumentException.class, - () -> MetadataCreateIndexService.validateSplitIndex( + () -> validateSplitIndex( createClusterState("source", 3, randomIntBetween(0, 10), Settings.builder().put("index.blocks.write", true).build()), "source", "target", @@ -367,12 +354,7 @@ public void testValidateSplitIndex() { routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - MetadataCreateIndexService.validateSplitIndex( - clusterState, - "source", - "target", - Settings.builder().put("index.number_of_shards", targetShards).build() - ); + validateSplitIndex(clusterState, "source", "target", Settings.builder().put("index.number_of_shards", targetShards).build()); } public void testPrepareResizeIndexSettings() { @@ -528,7 +510,9 @@ private void runPrepareResizeIndexSettingsTest( additionalIndexScopedSettings.stream() ).collect(Collectors.toSet()); MetadataCreateIndexService.prepareResizeIndexSettings( - clusterState, + clusterState.metadata(), + clusterState.blocks(), + clusterState.routingTable(), indexSettingsBuilder, clusterState.metadata().index(indexName).getIndex(), "target", @@ -576,9 +560,10 @@ public void testValidateIndexName() throws Exception { } private static void validateIndexName(MetadataCreateIndexService metadataCreateIndexService, String indexName, String errorMessage) { + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build(); InvalidIndexNameException e = expectThrows( InvalidIndexNameException.class, - () -> MetadataCreateIndexService.validateIndexName(indexName, ClusterState.builder(ClusterName.DEFAULT).build()) + () -> MetadataCreateIndexService.validateIndexName(indexName, state.metadata(), state.routingTable()) ); assertThat(e.getMessage(), endsWith(errorMessage)); } @@ -1423,4 +1408,19 @@ private void withTemporaryClusterService(BiConsumer threadPool.shutdown(); } } + + private List validateShrinkIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { + return MetadataCreateIndexService.validateShrinkIndex( + state.metadata(), + state.blocks(), + state.routingTable(), + sourceIndex, + targetIndexName, + targetIndexSettings + ); + } + + private void validateSplitIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { + MetadataCreateIndexService.validateSplitIndex(state.metadata(), state.blocks(), sourceIndex, targetIndexName, targetIndexSettings); + } } diff --git a/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java index 1c3d0d486b282..a5a61eab4df9e 100644 --- a/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java @@ -341,8 +341,10 @@ public void testCalculateMethods() { maxConfiguredShardsPerNode, numberOfNewShards, replicas, - state) -> { - assertEquals(mockedState, state); + discoveryNodes, + metadata) -> { + assertEquals(mockedState.nodes(), discoveryNodes); + assertEquals(mockedState.metadata(), metadata); assertEquals(randomMaxShardsPerNodeSetting, maxConfiguredShardsPerNode); return new ShardLimitValidator.Result( numberOfNewShards != shardsToAdd && replicas == 1, @@ -353,13 +355,19 @@ public void testCalculateMethods() { ); }; - assertEquals(calculateFrom(randomMaxShardsPerNodeSetting, mockedState, checkerWrapper.apply(5)).status(), RED); - assertEquals(calculateFrom(randomMaxShardsPerNodeSetting, mockedState, checkerWrapper.apply(10)).status(), YELLOW); + assertEquals( + calculateFrom(randomMaxShardsPerNodeSetting, mockedState.nodes(), mockedState.metadata(), checkerWrapper.apply(5)).status(), + RED + ); + assertEquals( + calculateFrom(randomMaxShardsPerNodeSetting, mockedState.nodes(), mockedState.metadata(), checkerWrapper.apply(10)).status(), + YELLOW + ); // Let's cover the holes :) Stream.of(randomIntBetween(1, 4), randomIntBetween(6, 9), randomIntBetween(11, Integer.MAX_VALUE)) .map(checkerWrapper) - .map(checker -> calculateFrom(randomMaxShardsPerNodeSetting, mockedState, checker)) + .map(checker -> calculateFrom(randomMaxShardsPerNodeSetting, mockedState.nodes(), mockedState.metadata(), checker)) .map(ShardsCapacityHealthIndicatorService.StatusResult::status) .forEach(status -> assertEquals(status, GREEN)); } diff --git a/server/src/test/java/org/elasticsearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/elasticsearch/indices/ShardLimitValidatorTests.java index 0eea536ddbff1..36b0711ad6c13 100644 --- a/server/src/test/java/org/elasticsearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/elasticsearch/indices/ShardLimitValidatorTests.java @@ -41,7 +41,13 @@ public class ShardLimitValidatorTests extends ESTestCase { @FunctionalInterface interface CheckShardLimitMethod { - ShardLimitValidator.Result call(int maxConfiguredShardsPerNode, int numberOfNewShards, int replicas, ClusterState state); + ShardLimitValidator.Result call( + int maxConfiguredShardsPerNode, + int numberOfNewShards, + int replicas, + DiscoveryNodes discoveryNodes, + Metadata metadata + ); } public void testOverShardLimit() { @@ -63,7 +69,8 @@ private void testOverShardLimit(CheckShardLimitMethod targetMethod, String group counts.getShardsPerNode(), counts.getFailingIndexShards(), counts.getFailingIndexReplicas(), - state + state.nodes(), + state.metadata() ); int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); @@ -113,7 +120,13 @@ private void testUnderShardLimit(CheckShardLimitMethod targetMethod, String grou int existingShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); int availableRoom = maxShardsInCluster - existingShards; int shardsToAdd = randomIntBetween(1, Math.max(availableRoom / (replicas + 1), 1)); - ShardLimitValidator.Result shardLimitsResult = targetMethod.call(counts.getShardsPerNode(), shardsToAdd, replicas, state); + ShardLimitValidator.Result shardLimitsResult = targetMethod.call( + counts.getShardsPerNode(), + shardsToAdd, + replicas, + state.nodes(), + state.metadata() + ); assertTrue(shardLimitsResult.canAddShards()); assertEquals(shardLimitsResult.maxShardsInCluster(), counts.getShardsPerNode() * nodesInCluster); assertEquals(shardLimitsResult.totalShardsToAdd(), shardsToAdd * (replicas + 1)); @@ -142,7 +155,7 @@ public void testValidateShardLimitOpenIndices() { ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), group); ValidationException exception = expectThrows( ValidationException.class, - () -> shardLimitValidator.validateShardLimit(state, indices) + () -> shardLimitValidator.validateShardLimit(state.nodes(), state.metadata(), indices) ); assertEquals( "Validation Failed: 1: this action would add [" @@ -168,11 +181,11 @@ public void testValidateShardLimitUpdateReplicas() { final Index[] indices = getIndices(state); final ShardLimitValidator shardLimitValidator = createTestShardLimitService(shardsPerNode, group); - shardLimitValidator.validateShardLimitOnReplicaUpdate(state, indices, nodesInCluster - 1); + shardLimitValidator.validateShardLimitOnReplicaUpdate(state.nodes(), state.metadata(), indices, nodesInCluster - 1); ValidationException exception = expectThrows( ValidationException.class, - () -> shardLimitValidator.validateShardLimitOnReplicaUpdate(state, indices, nodesInCluster) + () -> shardLimitValidator.validateShardLimitOnReplicaUpdate(state.nodes(), state.metadata(), indices, nodesInCluster) ); assertEquals( "Validation Failed: 1: this action would add [" diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index d8c9acff156ce..58a0370efb50e 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -267,7 +267,7 @@ protected void masterOperation( return; } try { - MetadataCreateIndexService.validateIndexName(downsampleIndexName, state); + MetadataCreateIndexService.validateIndexName(downsampleIndexName, state.metadata(), state.routingTable()); } catch (ResourceAlreadyExistsException e) { // ignore index already exists } From c21fc968d4180b025e2be2eb1bd7a4b503aeffbd Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 4 Sep 2024 09:26:00 +0200 Subject: [PATCH 025/115] Remove unnecessary synchronization from InternalTestCluster (#112420) I think we can drop some of the synchronization here without making any other changes. This significantly reduces JIT CPU time in internal cluster tests, which by way of allowing more parallel runs of the tests speeds them up. --- .../test/InternalTestCluster.java | 48 ++++++++++++------- 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 332df7123fd1b..38b8dfecc0b5e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -656,12 +656,17 @@ private NodeAndClient getOrBuildRandomNode() { } private NodeAndClient getRandomNodeAndClient() { - return getRandomNodeAndClient(Predicates.always()); + var n = nodes; + ensureOpen(); + if (n.isEmpty()) { + return null; + } + return randomFrom(n.values()); } - private synchronized NodeAndClient getRandomNodeAndClient(Predicate predicate) { - ensureOpen(); + private NodeAndClient getRandomNodeAndClient(Predicate predicate) { List values = nodes.values().stream().filter(predicate).collect(Collectors.toList()); + ensureOpen(); if (values.isEmpty() == false) { return randomFrom(random, values); } @@ -819,10 +824,17 @@ private static String getRoleSuffix(Settings settings) { } @Override - public synchronized Client client() { - ensureOpen(); + public Client client() { /* Randomly return a client to one of the nodes in the cluster */ - return getOrBuildRandomNode().client(); + NodeAndClient c = getRandomNodeAndClient(); + ensureOpen(); + if (c == null) { + synchronized (this) { + return getOrBuildRandomNode().client(); + } + } else { + return c.client(); + } } /** @@ -923,7 +935,7 @@ public synchronized void close() throws IOException { private final class NodeAndClient implements Closeable { private MockNode node; private final Settings originalNodeSettings; - private Client nodeClient; + private volatile Client nodeClient; private final AtomicBoolean closed = new AtomicBoolean(false); private final String name; private final int nodeAndClientId; @@ -937,10 +949,14 @@ private final class NodeAndClient implements Closeable { } Node node() { + ensureNotClosed(); + return node; + } + + private void ensureNotClosed() { if (closed.get()) { throw new RuntimeException("already closed"); } - return node; } public int nodeAndClientId() { @@ -961,21 +977,21 @@ Client client() { // TODO: collapse these together? Client nodeClient() { - if (closed.get()) { - throw new RuntimeException("already closed"); - } return getOrBuildNodeClient(); } private Client getOrBuildNodeClient() { + var n = nodeClient; + if (n != null) { + ensureNotClosed(); + return n; + } synchronized (InternalTestCluster.this) { - if (closed.get()) { - throw new RuntimeException("already closed"); - } + ensureNotClosed(); if (nodeClient == null) { - nodeClient = node.client(); + nodeClient = clientWrapper.apply(node.client()); } - return clientWrapper.apply(nodeClient); + return nodeClient; } } From 07b56e938189fe27a379706ad91b5c3b13f900ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Wed, 4 Sep 2024 10:05:30 +0200 Subject: [PATCH 026/115] Ensure _termStats is supported with function_score (#112349) --- .../191_term_statistics_function_score.yml | 680 ++++++++++++++++++ .../search/function/FunctionScoreQuery.java | 12 + .../search/function/ScriptScoreFunction.java | 19 + 3 files changed, 711 insertions(+) create mode 100644 modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml new file mode 100644 index 0000000000000..de4d6530f4a92 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml @@ -0,0 +1,680 @@ +setup: + - requires: + cluster_features: ["script.term_stats"] + reason: "support for term stats has been added in 8.16" + + - do: + indices.create: + index: test-index + body: + settings: + number_of_shards: "2" + mappings: + properties: + title: + type: text + genre: + type: text + fields: + keyword: + type: keyword + + - do: + index: { refresh: true, index: test-index, id: "1", routing: 0, body: {"title": "Star wars", "genre": "Sci-fi"} } + - do: + index: { refresh: true, index: test-index, id: "2", routing: 1, body: {"title": "Star trek", "genre": "Sci-fi"} } + - do: + index: { refresh: true, index: test-index, id: "3", routing: 1, body: {"title": "Rambo", "genre": "War movie"} } + - do: + index: { refresh: true, index: test-index, id: "4", routing: 1, body: {"title": "Rambo II", "genre": "War movie"} } + +--- +"match query: uniqueTermsCount without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"match query: uniqueTermsCount with DFS": + - do: + search: + search_type: dfs_query_then_fetch + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"match query: matchedTermsCount without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.matchedTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: matchedTermsCount with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.matchedTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: docFreq min without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.docFreq().getMin()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 0 } + +--- +"match query: docFreq min with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.docFreq().getMin()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: docFreq max without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: docFreq max with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"match query: totalTermFreq sum without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: totalTermFreq sum with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 3 } + - match: { hits.hits.1._score: 3 } + +--- +"match query: termFreq sum without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.termFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: termFreq sum with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.termFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: termPositions avg without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.termPositions().getAverage()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1.5 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: termPositions avg with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { match: { "title": "Star wars" } } + script_score: + script: + source: "return _termStats.termPositions().getAverage()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1.5 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: uniqueTermsCount without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: uniqueTermsCount with DFS": + - do: + search: + search_type: dfs_query_then_fetch + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: matchedTermsCount without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.matchedTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: matchedTermsCount with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.matchedTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: docFreq min without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.docFreq().getMin()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: docFreq min with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.docFreq().getMin()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"term query: docFreq max without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: docFreq max with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"term query: totalTermFreq sum without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: totalTermFreq sum with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"term query: termFreq sum without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.termFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: termFreq sum with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.termFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: termPositions avg without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.termPositions().getAverage()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + +--- +"term query: termPositions avg with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + function_score: + boost_mode: replace + query: { term: { "genre.keyword": "Sci-fi" } } + script_score: + script: + source: "return _termStats.termPositions().getAverage()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + +--- +"Complex bool query: uniqueTermsCount": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: + bool: + must: + match: { "title": "star wars" } + should: + term: { "genre.keyword": "Sci-fi" } + filter: + match: { "genre" : "sci"} + must_not: + term: { "genre.keyword": "War" } + script_score: + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 4 } + - match: { hits.hits.1._score: 4 } + + +--- +"match_all query: uniqueTermsCount": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: + match_all: {} + script_score: + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } + +--- +"match_all query: docFreq": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: + match_all: {} + script_score: + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } + +--- +"match_all query: totalTermFreq": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: + match_all: {} + script_score: + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } + +--- +"match_all query: termFreq": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: + match_all: {} + script_score: + script: + source: "return _termStats.termFreq().getMax()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } + +--- +"match_all query: termPositions": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + function_score: + boost_mode: replace + query: + match_all: {} + script_score: + script: + source: "return _termStats.termPositions().getSum()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index aed11297d4285..c47d0154fe048 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FilterScorer; @@ -25,14 +26,17 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.script.ScriptTermStats; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Objects; +import java.util.Set; /** * A query that allows for a pluggable boost function / filter. If it matches @@ -241,6 +245,14 @@ public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.Scor 1f ); } + if (functions[i] instanceof ScriptScoreFunction scriptScoreFunction && scriptScoreFunction.needsTermStats()) { + subQueryScoreMode = org.apache.lucene.search.ScoreMode.COMPLETE; + // We collect the different terms used in the child query. + final Set terms = new HashSet<>(); + this.visit(QueryVisitor.termCollector(terms)); + scriptScoreFunction.setTermStatsFactory((ctx, docIdSupplier) -> new ScriptTermStats(searcher, ctx, docIdSupplier, terms)); + + } } Weight subQueryWeight = subQuery.createWeight(searcher, subQueryScoreMode, boost); return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryScoreMode.needsScores()); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 6b8a75337b8ee..70233ed8ead6d 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -15,10 +15,13 @@ import org.elasticsearch.script.ExplainableScoreScript; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptTermStats; import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Objects; +import java.util.function.BiFunction; +import java.util.function.IntSupplier; public class ScriptScoreFunction extends ScoreFunction { @@ -45,6 +48,8 @@ public float score() { private final int shardId; private final String indexName; + private BiFunction termStatsFactory; + public ScriptScoreFunction(Script sScript, ScoreScript.LeafFactory script, SearchLookup lookup, String indexName, int shardId) { super(CombineFunction.REPLACE); this.sScript = sScript; @@ -61,6 +66,12 @@ public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOEx leafScript.setScorer(scorer); leafScript._setIndexName(indexName); leafScript._setShard(shardId); + + if (script.needs_termStats()) { + assert termStatsFactory != null; + leafScript._setTermStats(termStatsFactory.apply(ctx, scorer::docID)); + } + return new LeafScoreFunction() { private double score(int docId, float subQueryScore, ScoreScript.ExplanationHolder holder) throws IOException { @@ -111,6 +122,14 @@ public boolean needsScores() { return script.needs_score(); } + public boolean needsTermStats() { + return script.needs_termStats(); + } + + public void setTermStatsFactory(BiFunction termStatsFactory) { + this.termStatsFactory = termStatsFactory; + } + @Override public String toString() { return "script" + sScript.toString(); From 2fde9106188c9a348311592313f06f196feb2a30 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 4 Sep 2024 09:14:28 +0100 Subject: [PATCH 027/115] Deprecate Version.fromString (#112470) Version.fromString should not be used further, and existing uses (especially in production code) need to be reduced. Mark it deprecated to dissuade further use of this method. --- server/src/main/java/org/elasticsearch/Version.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 333669ca8079c..b751daf0e2d98 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -270,7 +270,9 @@ public static Version max(Version version1, Version version2) { /** * Returns the version given its string representation, current version if the argument is null or empty + * @deprecated Use of semantic release versions should be minimized; please avoid use of this method if possible. */ + @Deprecated public static Version fromString(String version) { if (Strings.hasLength(version) == false) { return Version.CURRENT; From 9b96665f5d6380702e98ed949acc0c07844faa15 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 4 Sep 2024 10:21:48 +0200 Subject: [PATCH 028/115] ESQL: Fix synthetic attribute pruning (#111413) - Fix ProjectAwayColumns to handle synthetic attributes and mark synthetically introduced Aliases as synthetic again. - Fix QueryPlan.references() - Simplify ProjectAwayColumns. - Simplify DependencyConsistency. - Make AggregateExec track the intermediate attributes it actually outputs/requries. --- docs/changelog/111413.yaml | 6 + .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/esql/core/expression/Attribute.java | 11 +- .../esql/core/expression/FieldAttribute.java | 48 ++-- .../esql/core/expression/NamedExpression.java | 4 + .../expression/FieldAttributeTestUtils.java | 27 ++ .../compute/aggregation/AggregatorMode.java | 13 +- .../xpack/esql/analysis/Analyzer.java | 32 ++- .../optimizer/LocalPhysicalPlanOptimizer.java | 5 + .../esql/optimizer/LogicalPlanOptimizer.java | 13 +- .../xpack/esql/optimizer/LogicalVerifier.java | 4 +- .../xpack/esql/optimizer/OptimizerRules.java | 123 +-------- .../esql/optimizer/PhysicalPlanOptimizer.java | 88 ++----- .../esql/optimizer/PhysicalVerifier.java | 4 +- .../ReplaceOrderByExpressionWithEval.java | 2 +- .../xpack/esql/plan/QueryPlan.java | 16 +- .../xpack/esql/plan/logical/Aggregate.java | 16 +- .../xpack/esql/plan/logical/Drop.java | 1 - .../xpack/esql/plan/logical/Enrich.java | 5 + .../xpack/esql/plan/logical/Eval.java | 12 + .../xpack/esql/plan/logical/LeafPlan.java | 6 + .../xpack/esql/plan/logical/Lookup.java | 5 + .../xpack/esql/plan/logical/MvExpand.java | 5 + .../xpack/esql/plan/logical/RegexExtract.java | 6 + .../esql/plan/logical/UnresolvedRelation.java | 13 - .../xpack/esql/plan/logical/join/Join.java | 5 + .../esql/plan/physical/AggregateExec.java | 90 +++++-- .../xpack/esql/plan/physical/EnrichExec.java | 6 + .../xpack/esql/plan/physical/EvalExec.java | 7 + .../esql/plan/physical/FieldExtractExec.java | 11 + .../esql/plan/physical/HashJoinExec.java | 6 + .../xpack/esql/plan/physical/LeafExec.java | 6 + .../esql/plan/physical/MvExpandExec.java | 6 + .../xpack/esql/plan/physical/OutputExec.java | 6 + .../esql/plan/physical/RegexExtractExec.java | 6 + .../AbstractPhysicalOperationProviders.java | 26 +- .../esql/planner/LocalExecutionPlanner.java | 3 +- .../xpack/esql/planner/Mapper.java | 36 ++- .../xpack/esql/planner/PlannerUtils.java | 3 +- .../xpack/esql/session/EsqlSession.java | 15 +- .../function/FieldAttributeTests.java | 6 +- .../esql/io/stream/PlanNamedTypesTests.java | 3 +- .../LocalLogicalPlanOptimizerTests.java | 5 + .../LocalPhysicalPlanOptimizerTests.java | 2 +- .../optimizer/PhysicalPlanOptimizerTests.java | 242 ++++++++++++++++-- .../xpack/esql/plan/logical/PhasedTests.java | 6 + .../AggregateExecSerializationTests.java | 19 +- .../session/IndexResolverFieldNamesTests.java | 20 ++ 48 files changed, 656 insertions(+), 345 deletions(-) create mode 100644 docs/changelog/111413.yaml create mode 100644 x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/FieldAttributeTestUtils.java diff --git a/docs/changelog/111413.yaml b/docs/changelog/111413.yaml new file mode 100644 index 0000000000000..0eae45b17d0c4 --- /dev/null +++ b/docs/changelog/111413.yaml @@ -0,0 +1,6 @@ +pr: 111413 +summary: "ESQL: Fix synthetic attribute pruning" +area: ES|QL +type: bug +issues: + - 105821 diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5d1fb3a017f2f..6640b8b5eac8f 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -205,6 +205,7 @@ static TransportVersion def(int id) { public static final TransportVersion GET_DATA_STREAMS_VERBOSE = def(8_735_00_0); public static final TransportVersion ESQL_ADD_INDEX_MODE_CONCRETE_INDICES = def(8_736_00_0); public static final TransportVersion UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH = def(8_737_00_0); + public static final TransportVersion ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS = def(8_738_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java index 3dda28a8abf9d..05c414298fd33 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java @@ -28,6 +28,11 @@ * The rest are not as they are not part of the projection and thus are not part of the derived table. */ public abstract class Attribute extends NamedExpression { + /** + * Changing this will break bwc with 8.15, see {@link FieldAttribute#fieldName()}. + */ + protected static final String SYNTHETIC_ATTRIBUTE_NAME_PREFIX = "$$"; + public static List getNamedWriteables() { // TODO add UnsupportedAttribute when these are moved to the same project return List.of(FieldAttribute.ENTRY, MetadataAttribute.ENTRY, ReferenceAttribute.ENTRY); @@ -49,6 +54,10 @@ public Attribute(Source source, String name, Nullability nullability, NameId id, this.nullability = nullability; } + public static String rawTemporaryName(String inner, String outer, String suffix) { + return SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + suffix; + } + @Override public final Expression replaceChildren(List newChildren) { throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); @@ -123,7 +132,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return name() + "{" + label() + "}" + "#" + id(); + return name() + "{" + label() + (synthetic() ? "$" : "") + "}" + "#" + id(); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java index 37f2cf863d53e..b5d44d98f476e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java @@ -30,9 +30,6 @@ * - nestedParent - if nested, what's the parent (which might not be the immediate one) */ public class FieldAttribute extends TypedAttribute { - // TODO: This constant should not be used if possible; use .synthetic() - // https://github.com/elastic/elasticsearch/issues/105821 - public static final String SYNTHETIC_ATTRIBUTE_NAME_PREFIX = "$$"; static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Attribute.class, @@ -52,6 +49,10 @@ public FieldAttribute(Source source, FieldAttribute parent, String name, EsField this(source, parent, name, field, Nullability.TRUE, null, false); } + public FieldAttribute(Source source, FieldAttribute parent, String name, EsField field, boolean synthetic) { + this(source, parent, name, field, Nullability.TRUE, null, synthetic); + } + public FieldAttribute( Source source, FieldAttribute parent, @@ -64,7 +65,11 @@ public FieldAttribute( this(source, parent, name, field.getDataType(), field, nullability, id, synthetic); } - public FieldAttribute( + /** + * Used only for testing. Do not use this otherwise, as an explicitly set type will be ignored the next time this FieldAttribute is + * {@link FieldAttribute#clone}d. + */ + FieldAttribute( Source source, FieldAttribute parent, String name, @@ -147,28 +152,7 @@ public String getWriteableName() { @Override protected NodeInfo info() { - return NodeInfo.create( - this, - (source, parent1, name, type, field1, qualifier, nullability, id, synthetic) -> new FieldAttribute( - source, - parent1, - name, - type, - field1, - qualifier, - nullability, - id, - synthetic - ), - parent, - name(), - dataType(), - field, - (String) null, - nullable(), - id(), - synthetic() - ); + return NodeInfo.create(this, FieldAttribute::new, parent, name(), dataType(), field, (String) null, nullable(), id(), synthetic()); } public FieldAttribute parent() { @@ -185,9 +169,9 @@ public String path() { public String fieldName() { // Before 8.15, the field name was the same as the attribute's name. // On later versions, the attribute can be renamed when creating synthetic attributes. - // TODO: We should use synthetic() to check for that case. - // https://github.com/elastic/elasticsearch/issues/105821 - if (name().startsWith(SYNTHETIC_ATTRIBUTE_NAME_PREFIX) == false) { + // Because until 8.15, we couldn't set `synthetic` to true due to a bug, in that version such FieldAttributes are marked by their + // name starting with `$$`. + if ((synthetic() || name().startsWith(SYNTHETIC_ATTRIBUTE_NAME_PREFIX)) == false) { return name(); } return Strings.hasText(path) ? path + "." + field.getName() : field.getName(); @@ -211,9 +195,15 @@ private FieldAttribute innerField(EsField type) { @Override protected Attribute clone(Source source, String name, DataType type, Nullability nullability, NameId id, boolean synthetic) { + // Ignore `type`, this must be the same as the field's type. return new FieldAttribute(source, parent, name, field, nullability, id, synthetic); } + @Override + public Attribute withDataType(DataType type) { + throw new UnsupportedOperationException("FieldAttribute obtains its type from the contained EsField."); + } + @Override public int hashCode() { return Objects.hash(super.hashCode(), path, field); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/NamedExpression.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/NamedExpression.java index 266ad8e2bb051..ba467910bed0d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/NamedExpression.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/NamedExpression.java @@ -51,6 +51,10 @@ public NameId id() { return id; } + /** + * Synthetic named expressions are not user defined and usually created during optimizations and substitutions, e.g. when turning + * {@code ... | STATS x = avg(2*field)} into {@code ... | EVAL $$synth$attribute = 2*field | STATS x = avg($$synth$attribute)}. + */ public boolean synthetic() { return synthetic; } diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/FieldAttributeTestUtils.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/FieldAttributeTestUtils.java new file mode 100644 index 0000000000000..1662b7f973c9d --- /dev/null +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/FieldAttributeTestUtils.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.core.expression; + +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; + +public class FieldAttributeTestUtils { + public static final FieldAttribute newFieldAttributeWithType( + Source source, + FieldAttribute parent, + String name, + DataType type, + EsField field, + Nullability nullability, + NameId id, + boolean synthetic + ) { + return new FieldAttribute(source, parent, name, type, field, nullability, id, synthetic); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorMode.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorMode.java index 22b20a445c196..706dcd02ed1ce 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorMode.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorMode.java @@ -9,13 +9,24 @@ public enum AggregatorMode { + /** + * Maps raw inputs to intermediate outputs. + */ INITIAL(false, true), + /** + * Maps intermediate inputs to intermediate outputs. + */ INTERMEDIATE(true, true), + /** + * Maps intermediate inputs to final outputs. + */ FINAL(true, false), - // most useful for testing + /** + * Maps raw inputs to final outputs. Most useful for testing. + */ SINGLE(false, false); private final boolean inputPartial; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 664c9bffb6499..4f9ef3df29a85 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; @@ -65,7 +64,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.index.EsIndex; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.TableIdentifier; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -1249,12 +1247,10 @@ private Expression createIfDoesNotAlreadyExist( List unionFieldAttributes ) { // Generate new ID for the field and suffix it with the data type to maintain unique attribute names. - String unionTypedFieldName = LogicalPlanOptimizer.rawTemporaryName( - fa.name(), - "converted_to", - resolvedField.getDataType().typeName() - ); - FieldAttribute unionFieldAttribute = new FieldAttribute(fa.source(), fa.parent(), unionTypedFieldName, resolvedField); + // NOTE: The name has to start with $$ to not break bwc with 8.15 - in that version, this is how we had to mark this as + // synthetic to work around a bug. + String unionTypedFieldName = Attribute.rawTemporaryName(fa.name(), "converted_to", resolvedField.getDataType().typeName()); + FieldAttribute unionFieldAttribute = new FieldAttribute(fa.source(), fa.parent(), unionTypedFieldName, resolvedField, true); int existingIndex = unionFieldAttributes.indexOf(unionFieldAttribute); if (existingIndex >= 0) { // Do not generate multiple name/type combinations with different IDs @@ -1280,8 +1276,16 @@ private MultiTypeEsField resolvedMultiTypeEsField(FieldAttribute fa, HashMap newOutput = new ArrayList<>(output.size()); for (Attribute attr : output) { - // TODO: this should really use .synthetic() - // https://github.com/elastic/elasticsearch/issues/105821 - if (attr.name().startsWith(FieldAttribute.SYNTHETIC_ATTRIBUTE_NAME_PREFIX) == false) { - newOutput.add(attr); + // Do not let the synthetic union type field attributes end up in the final output. + if (attr.synthetic() && attr instanceof FieldAttribute) { + continue; } + newOutput.add(attr); } return newOutput.size() == output.size() ? plan : new Project(Source.EMPTY, plan, newOutput); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index ccab93df187fe..dad8973919e10 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -552,6 +552,10 @@ private Tuple, List> pushableStats(AggregateExec aggregate singletonList(agg), emptyList() ); + // TODO: the attributes have been recreated here; they will have wrong name ids, and the dependency check will + // probably fail when we fix https://github.com/elastic/elasticsearch/issues/105436. + // We may need to refactor AbstractPhysicalOperationProviders.intermediateAttributes so it doesn't return just + // a list of attributes, but a mapping from the logical to the physical attributes. tuple.v1().addAll(intermediateAttributes); tuple.v2().add(stat); } @@ -604,6 +608,7 @@ && allowedForDocValues(fieldAttribute, agg, foundAttributes)) { agg.groupings(), orderedAggregates, agg.getMode(), + agg.intermediateAttributes(), agg.estimatedRowSize() ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 282f46e0de7bb..5fcd0e00d866a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; @@ -123,15 +122,11 @@ public LogicalPlanOptimizer(LogicalOptimizerContext optimizerContext) { public static String temporaryName(Expression inner, Expression outer, int suffix) { String in = toString(inner); String out = toString(outer); - return rawTemporaryName(in, out, String.valueOf(suffix)); + return Attribute.rawTemporaryName(in, out, String.valueOf(suffix)); } public static String locallyUniqueTemporaryName(String inner, String outer) { - return FieldAttribute.SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + new NameId(); - } - - public static String rawTemporaryName(String inner, String outer, String suffix) { - return FieldAttribute.SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + suffix; + return Attribute.rawTemporaryName(inner, outer, (new NameId()).toString()); } static String toString(Expression ex) { @@ -373,9 +368,7 @@ private static AttributeReplacement renameAttributesInExpressions( if (attributeNamesToRename.contains(attr.name())) { Alias renamedAttribute = aliasesForReplacedAttributes.computeIfAbsent(attr, a -> { String tempName = locallyUniqueTemporaryName(a.name(), "temp_name"); - // TODO: this should be synthetic - // blocked on https://github.com/elastic/elasticsearch/issues/98703 - return new Alias(a.source(), tempName, a, null, false); + return new Alias(a.source(), tempName, a, null, true); }); return renamedAttribute.toAttribute(); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java index cd61b4eb8892c..e95959d38f328 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java @@ -9,12 +9,12 @@ import org.elasticsearch.xpack.esql.capabilities.Validatable; import org.elasticsearch.xpack.esql.common.Failures; -import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.LogicalPlanDependencyCheck; +import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.DependencyConsistency; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; public final class LogicalVerifier { - private static final LogicalPlanDependencyCheck DEPENDENCY_CHECK = new LogicalPlanDependencyCheck(); + private static final DependencyConsistency DEPENDENCY_CHECK = new DependencyConsistency<>(); public static final LogicalVerifier INSTANCE = new LogicalVerifier(); private LogicalVerifier() {} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 733fe2e8762bb..7808bcbd86545 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -10,33 +10,8 @@ import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NameId; -import org.elasticsearch.xpack.esql.plan.GeneratingPlan; import org.elasticsearch.xpack.esql.plan.QueryPlan; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.InlineStats; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.Row; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; -import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; -import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; -import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; -import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec; -import org.elasticsearch.xpack.esql.plan.physical.EvalExec; -import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; -import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; -import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; -import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; -import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; -import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.RegexExtractExec; -import org.elasticsearch.xpack.esql.plan.physical.RowExec; -import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import java.util.HashSet; import java.util.Set; @@ -50,10 +25,11 @@ private OptimizerRules() {} static class DependencyConsistency

> { void checkPlan(P p, Failures failures) { - AttributeSet refs = references(p); + AttributeSet refs = p.references(); AttributeSet input = p.inputSet(); - AttributeSet generated = generates(p); - AttributeSet missing = refs.subtract(input).subtract(generated); + AttributeSet missing = refs.subtract(input); + // TODO: for Joins, we should probably check if the required fields from the left child are actually in the left child, not + // just any child (and analogously for the right child). if (missing.isEmpty() == false) { failures.add(fail(p, "Plan [{}] optimized incorrectly due to missing references {}", p.nodeString(), missing)); } @@ -73,96 +49,5 @@ void checkPlan(P p, Failures failures) { } } } - - protected AttributeSet references(P p) { - return p.references(); - } - - protected AttributeSet generates(P p) { - return AttributeSet.EMPTY; - } - } - - static class LogicalPlanDependencyCheck extends DependencyConsistency { - @Override - protected AttributeSet references(LogicalPlan plan) { - if (plan instanceof Enrich enrich) { - // The enrichFields are NamedExpressions, so we compute their references as well when just calling enrich.references(). - // But they are not actually referring to attributes from the input plan - only the match field does. - return enrich.matchField().references(); - } - return super.references(plan); - } - - @Override - protected AttributeSet generates(LogicalPlan logicalPlan) { - // source-like operators - if (logicalPlan instanceof EsRelation - || logicalPlan instanceof LocalRelation - || logicalPlan instanceof Row - || logicalPlan instanceof Aggregate - || logicalPlan instanceof InlineStats) { - return logicalPlan.outputSet(); - } - if (logicalPlan instanceof GeneratingPlan generating) { - return new AttributeSet(generating.generatedAttributes()); - } - if (logicalPlan instanceof MvExpand mvExpand) { - return new AttributeSet(mvExpand.expanded()); - } - - return AttributeSet.EMPTY; - } - } - - static class PhysicalPlanDependencyCheck extends DependencyConsistency { - @Override - protected AttributeSet generates(PhysicalPlan physicalPlan) { - // source-like operators - if (physicalPlan instanceof EsSourceExec - || physicalPlan instanceof EsStatsQueryExec - || physicalPlan instanceof EsQueryExec - || physicalPlan instanceof LocalSourceExec - || physicalPlan instanceof RowExec - || physicalPlan instanceof ExchangeExec - || physicalPlan instanceof ExchangeSourceExec - || physicalPlan instanceof AggregateExec - || physicalPlan instanceof ShowExec) { - return physicalPlan.outputSet(); - } - - if (physicalPlan instanceof FieldExtractExec fieldExtractExec) { - return new AttributeSet(fieldExtractExec.attributesToExtract()); - } - if (physicalPlan instanceof EvalExec eval) { - return new AttributeSet(Expressions.asAttributes(eval.fields())); - } - if (physicalPlan instanceof RegexExtractExec extract) { - return new AttributeSet(extract.extractedFields()); - } - if (physicalPlan instanceof MvExpandExec mvExpand) { - return new AttributeSet(mvExpand.expanded()); - } - if (physicalPlan instanceof EnrichExec enrich) { - return new AttributeSet(Expressions.asAttributes(enrich.enrichFields())); - } - - return AttributeSet.EMPTY; - } - - @Override - protected AttributeSet references(PhysicalPlan plan) { - if (plan instanceof AggregateExec aggregate) { - if (aggregate.getMode() == AggregateExec.Mode.FINAL) { - // lousy hack - need to generate the intermediate aggs yet the intermediateAggs method keep creating new IDs on each - // call - // in practice, the final aggregate should clearly declare the expected properties not hold on the original ones - // as they no longer apply - return aggregate.inputSet(); - } - } - return plan.references(); - } } - } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java index 2a410d6f386a4..d8d1668c092b0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java @@ -11,12 +11,9 @@ import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.rule.RuleExecutor; @@ -25,15 +22,9 @@ import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; -import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; -import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; -import org.elasticsearch.xpack.esql.plan.physical.RegexExtractExec; import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import java.util.ArrayList; @@ -89,53 +80,18 @@ static class ProjectAwayColumns extends Rule { @Override public PhysicalPlan apply(PhysicalPlan plan) { - var projectAll = new Holder<>(TRUE); - var keepCollecting = new Holder<>(TRUE); - var attributes = new AttributeSet(); - var aliases = new AttributeMap(); + Holder keepTraversing = new Holder<>(TRUE); + // Invariant: if we add a projection with these attributes after the current plan node, the plan remains valid + // and the overall output will not change. + Holder requiredAttributes = new Holder<>(plan.outputSet()); - return plan.transformDown(UnaryExec.class, p -> { - // no need for project all - if (p instanceof ProjectExec || p instanceof AggregateExec) { - projectAll.set(FALSE); + // This will require updating should we choose to have non-unary execution plans in the future. + return plan.transformDown(UnaryExec.class, currentPlanNode -> { + if (keepTraversing.get() == false) { + return currentPlanNode; } - if (keepCollecting.get()) { - p.forEachExpression(NamedExpression.class, ne -> { - var attr = ne.toAttribute(); - // filter out attributes declared as aliases before - if (ne instanceof Alias as) { - aliases.put(attr, as.child()); - attributes.remove(attr); - } else { - // skip synthetically added attributes (the ones from AVG), see LogicalPlanOptimizer.SubstituteSurrogates - if (attr.synthetic() == false && aliases.containsKey(attr) == false) { - attributes.add(attr); - } - } - }); - if (p instanceof RegexExtractExec ree) { - attributes.removeAll(ree.extractedFields()); - } - if (p instanceof MvExpandExec mvee) { - attributes.remove(mvee.expanded()); - } - if (p instanceof HashJoinExec join) { - attributes.removeAll(join.addedFields()); - for (Attribute rhs : join.rightFields()) { - if (join.leftFields().stream().anyMatch(x -> x.semanticEquals(rhs)) == false) { - attributes.remove(rhs); - } - } - } - if (p instanceof EnrichExec ee) { - for (NamedExpression enrichField : ee.enrichFields()) { - // TODO: why is this different then the remove above? - attributes.remove(enrichField instanceof Alias a ? a.child() : enrichField); - } - } - } - if (p instanceof ExchangeExec exec) { - keepCollecting.set(FALSE); + if (currentPlanNode instanceof ExchangeExec exec) { + keepTraversing.set(FALSE); var child = exec.child(); // otherwise expect a Fragment if (child instanceof FragmentExec fragmentExec) { @@ -143,8 +99,7 @@ public PhysicalPlan apply(PhysicalPlan plan) { // no need for projection when dealing with aggs if (logicalFragment instanceof Aggregate == false) { - var selectAll = projectAll.get(); - var output = selectAll ? exec.child().output() : new ArrayList<>(attributes); + List output = new ArrayList<>(requiredAttributes.get()); // if all the fields are filtered out, it's only the count that matters // however until a proper fix (see https://github.com/elastic/elasticsearch/issues/98703) // add a synthetic field (so it doesn't clash with the user defined one) to return a constant @@ -156,19 +111,22 @@ public PhysicalPlan apply(PhysicalPlan plan) { output = Expressions.asAttributes(fields); } // add a logical projection (let the local replanning remove it if needed) - p = exec.replaceChild( - new FragmentExec( - Source.EMPTY, - new Project(logicalFragment.source(), logicalFragment, output), - fragmentExec.esFilter(), - fragmentExec.estimatedRowSize(), - fragmentExec.reducer() - ) + FragmentExec newChild = new FragmentExec( + Source.EMPTY, + new Project(logicalFragment.source(), logicalFragment, output), + fragmentExec.esFilter(), + fragmentExec.estimatedRowSize(), + fragmentExec.reducer() ); + return new ExchangeExec(exec.source(), output, exec.inBetweenAggs(), newChild); } } + } else { + AttributeSet childOutput = currentPlanNode.inputSet(); + AttributeSet addedAttributes = currentPlanNode.outputSet().subtract(childOutput); + requiredAttributes.set(requiredAttributes.get().subtract(addedAttributes).combine(currentPlanNode.references())); } - return p; + return currentPlanNode; }); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java index 7843464650e37..f9ce83d5c1f15 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java @@ -10,7 +10,7 @@ import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.PhysicalPlanDependencyCheck; +import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.DependencyConsistency; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -24,7 +24,7 @@ public final class PhysicalVerifier { public static final PhysicalVerifier INSTANCE = new PhysicalVerifier(); - private static final PhysicalPlanDependencyCheck DEPENDENCY_CHECK = new PhysicalPlanDependencyCheck(); + private static final DependencyConsistency DEPENDENCY_CHECK = new DependencyConsistency<>(); private PhysicalVerifier() {} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java index 3886982a16aba..9bb28f38ea65f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java @@ -18,7 +18,7 @@ import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.rawTemporaryName; +import static org.elasticsearch.xpack.esql.core.expression.Attribute.rawTemporaryName; public final class ReplaceOrderByExpressionWithEval extends OptimizerRules.OptimizerRule { private static int counter = 0; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QueryPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QueryPlan.java index 3ee56771bc3f7..e34e0b8e27863 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QueryPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QueryPlan.java @@ -66,15 +66,27 @@ public List expressions() { } /** - * Returns the expressions referenced on this query plan node. + * The attributes required to be in the {@link QueryPlan#inputSet()} for this plan to be valid. + * Excludes generated references. + *

+ * E.g. for {@code EVAL x = 2*some_field, y = 2*x} this includes {@code some_field} but neither {@code x} nor {@code y}. + * For {@code ENRICH some_policy ON field WITH some_enrich_field} this includes {@code field} but excludes the generated reference + * {@code some_enrich_field}. */ public AttributeSet references() { if (lazyReferences == null) { - lazyReferences = Expressions.references(expressions()); + lazyReferences = computeReferences(); } return lazyReferences; } + /** + * This very likely needs to be overridden for {@link QueryPlan#references} to be correct when inheriting. + */ + protected AttributeSet computeReferences() { + return Expressions.references(expressions()); + } + // // pass Object.class as a type token to pick Collections of expressions not just expressions // diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java index 7ed2d04400be1..8445c8236c45a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; @@ -140,11 +141,24 @@ public boolean expressionsResolved() { @Override public List output() { if (lazyOutput == null) { - lazyOutput = mergeOutputAttributes(Expressions.asAttributes(aggregates()), emptyList()); + lazyOutput = output(aggregates); } return lazyOutput; } + public static List output(List aggregates) { + return mergeOutputAttributes(Expressions.asAttributes(aggregates), emptyList()); + } + + @Override + protected AttributeSet computeReferences() { + return computeReferences(aggregates, groupings); + } + + public static AttributeSet computeReferences(List aggregates, List groupings) { + return Expressions.references(groupings).combine(Expressions.references(aggregates)); + } + @Override public int hashCode() { return Objects.hash(aggregateType, groupings, aggregates, child()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java index b32139c18e08e..add5a2d576c00 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java @@ -38,7 +38,6 @@ public List removals() { return removals; } - @Override public String commandName() { return "DROP"; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java index 762d5ffcc4532..fcb5d9dbd61cf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NameId; @@ -192,6 +193,10 @@ public Mode mode() { } @Override + protected AttributeSet computeReferences() { + return matchField.references(); + } + public String commandName() { return "ENRICH"; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java index 6b217a7a81541..b24a87d365020 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java @@ -14,6 +14,8 @@ import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; @@ -69,6 +71,16 @@ public List output() { return lazyOutput; } + @Override + protected AttributeSet computeReferences() { + return computeReferences(fields); + } + + public static AttributeSet computeReferences(List fields) { + AttributeSet generated = new AttributeSet(asAttributes(fields)); + return Expressions.references(fields).subtract(generated); + } + @Override public List generatedAttributes() { return asAttributes(fields); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LeafPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LeafPlan.java index d21b61a81cd9e..008dcbb475546 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LeafPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LeafPlan.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.plan.logical; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.Collections; @@ -17,6 +18,11 @@ protected LeafPlan(Source source) { super(source, Collections.emptyList()); } + @Override + protected AttributeSet computeReferences() { + return AttributeSet.EMPTY; + } + @Override public final LogicalPlan replaceChildren(List newChildren) { throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java index 141d1a0945ddd..d6ab24fe44c99 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -112,6 +113,10 @@ public JoinConfig joinConfig() { } @Override + protected AttributeSet computeReferences() { + return new AttributeSet(matchFields); + } + public String commandName() { return "LOOKUP"; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java index 8519ca0350b6e..46ebc43d698a6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -78,6 +79,10 @@ public Attribute expanded() { } @Override + protected AttributeSet computeReferences() { + return target.references(); + } + public String commandName() { return "MV_EXPAND"; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java index f95bee92d4e1a..ab813f1a9c737 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -39,6 +40,11 @@ public List output() { return mergeOutputAttributes(extractedFields, child().output()); } + @Override + protected AttributeSet computeReferences() { + return input.references(); + } + public Expression input() { return input; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java index fdc2321c8bef6..0dfbe4936e4e3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java @@ -10,9 +10,6 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; -import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.plan.TableIdentifier; @@ -121,16 +118,6 @@ public String unresolvedMessage() { return unresolvedMsg; } - @Override - public AttributeSet references() { - AttributeSet refs = super.references(); - if (indexMode == IndexMode.TIME_SERIES) { - refs = new AttributeSet(refs); - refs.add(new UnresolvedAttribute(source(), MetadataAttribute.TIMESTAMP_FIELD)); - } - return refs; - } - @Override public int hashCode() { return Objects.hash(source(), table, metadataFields, indexMode, unresolvedMsg); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java index b9004e3758c9b..e920028f04cb9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java @@ -76,6 +76,11 @@ public JoinConfig config() { return config; } + @Override + protected AttributeSet computeReferences() { + return Expressions.references(config.leftFields()).combine(Expressions.references(config.rightFields())); + } + @Override protected NodeInfo info() { // Do not just add the JoinConfig as a whole - this would prevent correctly registering the diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java index 42fb0ab0bdf3e..f003abca7d1da 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java @@ -7,17 +7,20 @@ package org.elasticsearch.xpack.esql.plan.physical; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import java.io.IOException; import java.util.List; @@ -32,8 +35,13 @@ public class AggregateExec extends UnaryExec implements EstimatesRowSize { private final List groupings; private final List aggregates; + /** + * The output attributes of {@link AggregatorMode#INITIAL} and {@link AggregatorMode#INTERMEDIATE} aggregations, resp. + * the input attributes of {@link AggregatorMode#FINAL} and {@link AggregatorMode#INTERMEDIATE} aggregations. + */ + private final List intermediateAttributes; - private final Mode mode; + private final AggregatorMode mode; /** * Estimate of the number of bytes that'll be loaded per position before @@ -41,34 +49,33 @@ public class AggregateExec extends UnaryExec implements EstimatesRowSize { */ private final Integer estimatedRowSize; - public enum Mode { - SINGLE, - PARTIAL, // maps raw inputs to intermediate outputs - FINAL, // maps intermediate inputs to final outputs - } - public AggregateExec( Source source, PhysicalPlan child, List groupings, List aggregates, - Mode mode, + AggregatorMode mode, + List intermediateAttributes, Integer estimatedRowSize ) { super(source, child); this.groupings = groupings; this.aggregates = aggregates; this.mode = mode; + this.intermediateAttributes = intermediateAttributes; this.estimatedRowSize = estimatedRowSize; } private AggregateExec(StreamInput in) throws IOException { + // This is only deserialized as part of node level reduction, which is turned off until at least 8.16. + // So, we do not have to consider previous transport versions here, because old nodes will not send AggregateExecs to new nodes. this( Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readPhysicalPlanNode(), in.readNamedWriteableCollectionAsList(Expression.class), in.readNamedWriteableCollectionAsList(NamedExpression.class), - in.readEnum(AggregateExec.Mode.class), + in.readEnum(AggregatorMode.class), + in.readNamedWriteableCollectionAsList(Attribute.class), in.readOptionalVInt() ); } @@ -79,7 +86,12 @@ public void writeTo(StreamOutput out) throws IOException { ((PlanStreamOutput) out).writePhysicalPlanNode(child()); out.writeNamedWriteableCollection(groupings()); out.writeNamedWriteableCollection(aggregates()); - out.writeEnum(getMode()); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS)) { + out.writeEnum(getMode()); + out.writeNamedWriteableCollection(intermediateAttributes()); + } else { + out.writeEnum(AggregateExec.Mode.fromAggregatorMode(getMode())); + } out.writeOptionalVInt(estimatedRowSize()); } @@ -90,12 +102,12 @@ public String getWriteableName() { @Override protected NodeInfo info() { - return NodeInfo.create(this, AggregateExec::new, child(), groupings, aggregates, mode, estimatedRowSize); + return NodeInfo.create(this, AggregateExec::new, child(), groupings, aggregates, mode, intermediateAttributes, estimatedRowSize); } @Override public AggregateExec replaceChild(PhysicalPlan newChild) { - return new AggregateExec(source(), newChild, groupings, aggregates, mode, estimatedRowSize); + return new AggregateExec(source(), newChild, groupings, aggregates, mode, intermediateAttributes, estimatedRowSize); } public List groupings() { @@ -106,8 +118,8 @@ public List aggregates() { return aggregates; } - public AggregateExec withMode(Mode newMode) { - return new AggregateExec(source(), child(), groupings, aggregates, newMode, estimatedRowSize); + public AggregateExec withMode(AggregatorMode newMode) { + return new AggregateExec(source(), child(), groupings, aggregates, newMode, intermediateAttributes, estimatedRowSize); } /** @@ -122,21 +134,60 @@ public Integer estimatedRowSize() { public PhysicalPlan estimateRowSize(State state) { state.add(false, aggregates); // The groupings are contained within the aggregates int size = state.consumeAllFields(true); - return Objects.equals(this.estimatedRowSize, size) ? this : new AggregateExec(source(), child(), groupings, aggregates, mode, size); + return Objects.equals(this.estimatedRowSize, size) + ? this + : new AggregateExec(source(), child(), groupings, aggregates, mode, intermediateAttributes, size); } - public Mode getMode() { + public AggregatorMode getMode() { return mode; } + /** + * Used only for bwc when de-/serializing. + */ + @Deprecated + private enum Mode { + SINGLE, + PARTIAL, // maps raw inputs to intermediate outputs + FINAL; // maps intermediate inputs to final outputs + + static Mode fromAggregatorMode(AggregatorMode aggregatorMode) { + return switch (aggregatorMode) { + case SINGLE -> SINGLE; + case INITIAL -> PARTIAL; + case FINAL -> FINAL; + // If needed, we could have this return an PARTIAL instead; that's how intermediate aggs were encoded in the past for + // data node level reduction. + case INTERMEDIATE -> throw new UnsupportedOperationException( + "cannot turn intermediate aggregation into single, partial or final." + ); + }; + } + } + + /** + * Aggregations are usually performed in two steps, first partial (e.g. locally on a data node) then final (on the coordinator node). + * These are the intermediate attributes output by a partial aggregation or consumed by a final one. + * C.f. {@link org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders#intermediateAttributes}. + */ + public List intermediateAttributes() { + return intermediateAttributes; + } + @Override public List output() { - return Expressions.asAttributes(aggregates); + return mode.isOutputPartial() ? intermediateAttributes : Aggregate.output(aggregates); + } + + @Override + protected AttributeSet computeReferences() { + return mode.isInputPartial() ? new AttributeSet(intermediateAttributes) : Aggregate.computeReferences(aggregates, groupings); } @Override public int hashCode() { - return Objects.hash(groupings, aggregates, mode, estimatedRowSize, child()); + return Objects.hash(groupings, aggregates, mode, intermediateAttributes, estimatedRowSize, child()); } @Override @@ -153,6 +204,7 @@ public boolean equals(Object obj) { return Objects.equals(groupings, other.groupings) && Objects.equals(aggregates, other.aggregates) && Objects.equals(mode, other.mode) + && Objects.equals(intermediateAttributes, other.intermediateAttributes) && Objects.equals(estimatedRowSize, other.estimatedRowSize) && Objects.equals(child(), other.child()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java index bdf1c006f8b17..fc710d92b4f0e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.plan.physical; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -59,6 +60,11 @@ public EnrichExec( this.enrichFields = enrichFields; } + @Override + protected AttributeSet computeReferences() { + return matchField.references(); + } + @Override protected NodeInfo info() { return NodeInfo.create( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EvalExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EvalExec.java index 97b81914f8889..860ba1489f572 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EvalExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EvalExec.java @@ -12,10 +12,12 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.logical.Eval; import java.io.IOException; import java.util.List; @@ -62,6 +64,11 @@ public List output() { return mergeOutputAttributes(fields, child().output()); } + @Override + protected AttributeSet computeReferences() { + return Eval.computeReferences(fields); + } + @Override public UnaryExec replaceChild(PhysicalPlan newChild) { return new EvalExec(source(), newChild, fields); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java index 71ac67e931dd8..d2df97734f195 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.plan.physical; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.NodeUtils; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -48,6 +49,16 @@ public static Attribute extractSourceAttributesFrom(PhysicalPlan plan) { return null; } + @Override + protected AttributeSet computeReferences() { + AttributeSet required = new AttributeSet(docValuesAttributes); + + required.add(sourceAttribute); + required.addAll(attributesToExtract); + + return required; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, FieldExtractExec::new, child(), attributesToExtract, docValuesAttributes); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java index 0415a5cbb9132..b1daef8cb2e5c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -98,6 +99,11 @@ public List output() { return output; } + @Override + protected AttributeSet computeReferences() { + return Expressions.references(leftFields); + } + @Override public HashJoinExec replaceChild(PhysicalPlan newChild) { return new HashJoinExec(source(), newChild, joinData, matchFields, leftFields, rightFields, output); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LeafExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LeafExec.java index dd8d4e4f1de21..042357b55d7e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LeafExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LeafExec.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.plan.physical; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.Collections; @@ -18,6 +19,11 @@ protected LeafExec(Source source) { super(source, Collections.emptyList()); } + @Override + protected AttributeSet computeReferences() { + return AttributeSet.EMPTY; + } + @Override public final LeafExec replaceChildren(List newChildren) { throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/MvExpandExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/MvExpandExec.java index ebf7d1aba7b8a..2e7531a880742 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/MvExpandExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/MvExpandExec.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.plan.physical; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -29,6 +30,11 @@ public MvExpandExec(Source source, PhysicalPlan child, NamedExpression target, A this.output = calculateOutput(child.output(), target, expanded); } + @Override + protected AttributeSet computeReferences() { + return target.references(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, MvExpandExec::new, child(), target, expanded); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/OutputExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/OutputExec.java index 84f83b00665f3..b4a5608e31dfd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/OutputExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/OutputExec.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.plan.physical; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -31,6 +32,11 @@ public Consumer getPageConsumer() { return pageConsumer; } + @Override + protected AttributeSet computeReferences() { + return AttributeSet.EMPTY; + } + @Override public UnaryExec replaceChild(PhysicalPlan newChild) { return new OutputExec(source(), newChild, pageConsumer); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/RegexExtractExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/RegexExtractExec.java index 6bc35fc1bdded..bbc76cad54373 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/RegexExtractExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/RegexExtractExec.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.plan.physical; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -32,6 +33,11 @@ public List output() { return mergeOutputAttributes(extractedFields, child().output()); } + @Override + protected AttributeSet computeReferences() { + return inputExpression.references(); + } + public Expression inputExpression() { return inputExpression; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java index 3971e79cdc6d9..0e71963e29270 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java @@ -51,23 +51,17 @@ public final PhysicalOperation groupingPhysicalOperation( ) { Layout.Builder layout = new Layout.Builder(); Operator.OperatorFactory operatorFactory = null; - AggregateExec.Mode mode = aggregateExec.getMode(); + AggregatorMode aggregatorMode = aggregateExec.getMode(); var aggregates = aggregateExec.aggregates(); var sourceLayout = source.layout; - AggregatorMode aggregatorMode; - if (mode == AggregateExec.Mode.FINAL) { - aggregatorMode = AggregatorMode.FINAL; - } else if (mode == AggregateExec.Mode.PARTIAL) { - if (aggregateExec.child() instanceof ExchangeSourceExec) {// the reducer step at data node (local) level - aggregatorMode = AggregatorMode.INTERMEDIATE; - } else { - aggregatorMode = AggregatorMode.INITIAL; - } - } else { - assert false : "Invalid aggregator mode [" + mode + "]"; - aggregatorMode = AggregatorMode.SINGLE; + if (aggregatorMode != AggregatorMode.INITIAL && aggregatorMode != AggregatorMode.FINAL) { + assert false : "Invalid aggregator mode [" + aggregatorMode + "]"; + } + if (aggregatorMode == AggregatorMode.INITIAL && aggregateExec.child() instanceof ExchangeSourceExec) { + // the reducer step at data node (local) level + aggregatorMode = AggregatorMode.INTERMEDIATE; } if (aggregateExec.groupings().isEmpty()) { @@ -75,7 +69,7 @@ public final PhysicalOperation groupingPhysicalOperation( List aggregatorFactories = new ArrayList<>(); // append channels to the layout - if (mode == AggregateExec.Mode.FINAL) { + if (aggregatorMode == AggregatorMode.FINAL) { layout.append(aggregates); } else { layout.append(aggregateMapper.mapNonGrouping(aggregates)); @@ -122,7 +116,7 @@ public final PhysicalOperation groupingPhysicalOperation( // check if there's any alias used in grouping - no need for the final reduction since the intermediate data // is in the output form // if the group points to an alias declared in the aggregate, use the alias child as source - else if (mode == AggregateExec.Mode.PARTIAL) { + else if (aggregatorMode == AggregatorMode.INITIAL || aggregatorMode == AggregatorMode.INTERMEDIATE) { if (groupAttribute.semanticEquals(a.toAttribute())) { groupAttribute = attr; break; @@ -136,7 +130,7 @@ else if (mode == AggregateExec.Mode.PARTIAL) { groupSpecs.add(new GroupSpec(groupInput == null ? null : groupInput.channel(), groupAttribute)); } - if (mode == AggregateExec.Mode.FINAL) { + if (aggregatorMode == AggregatorMode.FINAL) { for (var agg : aggregates) { if (Alias.unwrap(agg) instanceof AggregateFunction) { layout.append(agg); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 60d76f1a8593a..b28c80211c649 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.Describable; +import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; @@ -168,7 +169,7 @@ public LocalExecutionPlan plan(PhysicalPlan localPhysicalPlan) { // workaround for https://github.com/elastic/elasticsearch/issues/99782 localPhysicalPlan = localPhysicalPlan.transformUp( AggregateExec.class, - a -> a.getMode() == AggregateExec.Mode.FINAL ? new ProjectExec(a.source(), a, Expressions.asAttributes(a.aggregates())) : a + a -> a.getMode() == AggregatorMode.FINAL ? new ProjectExec(a.source(), a, Expressions.asAttributes(a.aggregates())) : a ); PhysicalOperation physicalOperation = plan(localPhysicalPlan, context); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index 299149c6daabc..9613fa1f3fcde 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -8,7 +8,9 @@ package org.elasticsearch.xpack.esql.planner; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; @@ -52,9 +54,7 @@ import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; -import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode; -import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.FINAL; -import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.PARTIAL; +import java.util.List; /** *

This class is part of the planner

@@ -216,9 +216,13 @@ private PhysicalPlan map(UnaryPlan p, PhysicalPlan child) { } private PhysicalPlan map(Aggregate aggregate, PhysicalPlan child) { + List intermediateAttributes = AbstractPhysicalOperationProviders.intermediateAttributes( + aggregate.aggregates(), + aggregate.groupings() + ); // in local mode the only aggregate that can appear is the partial side under an exchange if (localMode) { - child = aggExec(aggregate, child, PARTIAL); + child = aggExec(aggregate, child, AggregatorMode.INITIAL, intermediateAttributes); } // otherwise create both sides of the aggregate (for parallelism purposes), if no fragment is present // TODO: might be easier long term to end up with just one node and split if necessary instead of doing that always at this stage @@ -226,23 +230,35 @@ private PhysicalPlan map(Aggregate aggregate, PhysicalPlan child) { child = addExchangeForFragment(aggregate, child); // exchange was added - use the intermediates for the output if (child instanceof ExchangeExec exchange) { - var output = AbstractPhysicalOperationProviders.intermediateAttributes(aggregate.aggregates(), aggregate.groupings()); - child = new ExchangeExec(child.source(), output, true, exchange.child()); + child = new ExchangeExec(child.source(), intermediateAttributes, true, exchange.child()); } // if no exchange was added, create the partial aggregate else { - child = aggExec(aggregate, child, PARTIAL); + child = aggExec(aggregate, child, AggregatorMode.INITIAL, intermediateAttributes); } // regardless, always add the final agg - child = aggExec(aggregate, child, FINAL); + child = aggExec(aggregate, child, AggregatorMode.FINAL, intermediateAttributes); } return child; } - private static AggregateExec aggExec(Aggregate aggregate, PhysicalPlan child, Mode aggMode) { - return new AggregateExec(aggregate.source(), child, aggregate.groupings(), aggregate.aggregates(), aggMode, null); + private static AggregateExec aggExec( + Aggregate aggregate, + PhysicalPlan child, + AggregatorMode aggMode, + List intermediateAttributes + ) { + return new AggregateExec( + aggregate.source(), + child, + aggregate.groupings(), + aggregate.aggregates(), + aggMode, + intermediateAttributes, + null + ); } private PhysicalPlan map(Limit limit, PhysicalPlan child) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 29be49e60ad37..d2568e5f5031c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.core.Tuple; @@ -97,7 +98,7 @@ public static PhysicalPlan dataNodeReductionPlan(LogicalPlan plan, PhysicalPlan Mapper mapper = new Mapper(true); var physicalPlan = EstimatesRowSize.estimateRowSize(0, mapper.map(plan)); var aggregate = (AggregateExec) physicalPlan.collectFirstChildren(AggregateExec.class::isInstance).get(0); - return aggregate.withMode(AggregateExec.Mode.PARTIAL); + return aggregate.withMode(AggregatorMode.INITIAL); } else { throw new EsqlIllegalArgumentException("unsupported unary physical plan node [" + pipelineBreaker.nodeName() + "]"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 29a32df8e6239..674f2c3c2ee65 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -27,6 +28,7 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; @@ -50,6 +52,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Phased; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; +import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -292,14 +295,19 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF } // but keep the inputs needed by Grok/Dissect references.addAll(re.input().references()); - } else if (p instanceof Enrich) { - AttributeSet enrichRefs = p.references(); + } else if (p instanceof Enrich enrich) { + AttributeSet enrichRefs = Expressions.references(enrich.enrichFields()); + enrichRefs = enrichRefs.combine(enrich.matchField().references()); // Enrich adds an EmptyAttribute if no match field is specified // The exact name of the field will be added later as part of enrichPolicyMatchFields Set enrichRefs.removeIf(attr -> attr instanceof EmptyAttribute); references.addAll(enrichRefs); } else { references.addAll(p.references()); + if (p instanceof UnresolvedRelation ur && ur.indexMode() == IndexMode.TIME_SERIES) { + // METRICS aggs generally rely on @timestamp without the user having to mention it. + references.add(new UnresolvedAttribute(ur.source(), MetadataAttribute.TIMESTAMP_FIELD)); + } // special handling for UnresolvedPattern (which is not an UnresolvedAttribute) p.forEachExpression(UnresolvedNamePattern.class, up -> { var ua = new UnresolvedAttribute(up.source(), up.name()); @@ -317,10 +325,11 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF // remove any already discovered UnresolvedAttributes that are in fact aliases defined later down in the tree // for example "from test | eval x = salary | stats max = max(x) by gender" // remove the UnresolvedAttribute "x", since that is an Alias defined in "eval" + AttributeSet planRefs = Expressions.references(p.expressions()); p.forEachExpressionDown(Alias.class, alias -> { // do not remove the UnresolvedAttribute that has the same name as its alias, ie "rename id = id" // or the UnresolvedAttributes that are used in Functions that have aliases "STATS id = MAX(id)" - if (p.references().names().contains(alias.name())) { + if (planRefs.names().contains(alias.name())) { return; } references.removeIf(attr -> matchByName(attr, alias.name(), keepCommandReferences.contains(attr))); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java index 8090a20ddc836..e8f0333791844 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java @@ -15,6 +15,8 @@ import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.type.AbstractEsFieldTypeTests; +import static org.elasticsearch.xpack.esql.core.expression.FieldAttributeTestUtils.newFieldAttributeWithType; + public class FieldAttributeTests extends AbstractAttributeTestCase { public static FieldAttribute createFieldAttribute(int maxDepth, boolean onlyRepresentable) { Source source = Source.EMPTY; @@ -26,7 +28,7 @@ public static FieldAttribute createFieldAttribute(int maxDepth, boolean onlyRepr EsField field = AbstractEsFieldTypeTests.randomAnyEsField(maxDepth); Nullability nullability = randomFrom(Nullability.values()); boolean synthetic = randomBoolean(); - return new FieldAttribute(source, parent, name, type, field, nullability, new NameId(), synthetic); + return newFieldAttributeWithType(source, parent, name, type, field, nullability, new NameId(), synthetic); } @Override @@ -51,6 +53,6 @@ protected FieldAttribute mutate(FieldAttribute instance) { case 4 -> nullability = randomValueOtherThan(nullability, () -> randomFrom(Nullability.values())); case 5 -> synthetic = false == synthetic; } - return new FieldAttribute(source, parent, name, type, field, nullability, new NameId(), synthetic); + return newFieldAttributeWithType(source, parent, name, type, field, nullability, new NameId(), synthetic); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index a3d1e70e558d6..853062676a0dc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -55,6 +55,7 @@ import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.xpack.esql.core.expression.FieldAttributeTestUtils.newFieldAttributeWithType; import static org.hamcrest.Matchers.equalTo; public class PlanNamedTypesTests extends ESTestCase { @@ -127,7 +128,7 @@ static FieldAttribute randomFieldAttributeOrNull() { } static FieldAttribute randomFieldAttribute() { - return new FieldAttribute( + return newFieldAttributeWithType( Source.EMPTY, randomFieldAttributeOrNull(), // parent randomAlphaOfLength(randomIntBetween(1, 25)), // name diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index 5c166e92ab152..8ee84daf06802 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; @@ -229,6 +230,10 @@ public MockFieldAttributeCommand(Source source, LogicalPlan child, FieldAttribut } @Override + protected AttributeSet computeReferences() { + return AttributeSet.EMPTY; + } + public void writeTo(StreamOutput out) { throw new UnsupportedOperationException("not serialized"); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 8a6712869d35e..eeb720084e635 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -69,11 +69,11 @@ import java.util.Map; import static java.util.Arrays.asList; +import static org.elasticsearch.compute.aggregation.AggregatorMode.FINAL; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; import static org.elasticsearch.xpack.esql.EsqlTestUtils.configuration; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; -import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.FINAL; import static org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.StatsType; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 6f489541a7815..32993ca90cd83 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; @@ -62,6 +63,8 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; @@ -124,6 +127,8 @@ import java.util.stream.Collectors; import static java.util.Arrays.asList; +import static org.elasticsearch.compute.aggregation.AggregatorMode.FINAL; +import static org.elasticsearch.compute.aggregation.AggregatorMode.INITIAL; import static org.elasticsearch.core.Tuple.tuple; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; @@ -144,9 +149,8 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.parser.ExpressionBuilder.MAX_EXPRESSION_DEPTH; import static org.elasticsearch.xpack.esql.parser.LogicalPlanBuilder.MAX_QUERY_DEPTH; -import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.FINAL; -import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.PARTIAL; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -554,7 +558,18 @@ public void testExtractorMultiEvalWithDifferentNames() { var extract = as(project.child(), FieldExtractExec.class); assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary") + containsInAnyOrder( + "_meta_field", + "emp_no", + "first_name", + "gender", + "job", + "job.raw", + "languages", + "last_name", + "long_noidx", + "salary" + ) ); } @@ -584,7 +599,18 @@ public void testExtractorMultiEvalWithSameName() { var extract = as(project.child(), FieldExtractExec.class); assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary") + containsInAnyOrder( + "_meta_field", + "emp_no", + "first_name", + "gender", + "job", + "job.raw", + "languages", + "last_name", + "long_noidx", + "salary" + ) ); } @@ -2072,6 +2098,139 @@ public void testFieldExtractWithoutSourceAttributes() { ); } + public void testProjectAwayColumns() { + var rule = new PhysicalPlanOptimizer.ProjectAwayColumns(); + + // FROM test | limit 10000 + // + // as physical plan: + // + // LimitExec[10000[INTEGER]] + // \_ExchangeExec[[],false] + // \_FragmentExec[filter=null, estimatedRowSize=0, reducer=[], fragment=[<> + // EsRelation[test][some_field1{f}#2, some_field2{f}#3]<>]] + + EsRelation relation = new EsRelation( + Source.EMPTY, + new EsIndex( + "test", + Map.of( + "some_field1", + new EsField("some_field1", DataType.KEYWORD, Map.of(), true), + "some_field2", + new EsField("some_field2", DataType.KEYWORD, Map.of(), true) + ) + ), + IndexMode.STANDARD, + false + ); + Attribute some_field1 = relation.output().get(0); + Attribute some_field2 = relation.output().get(1); + FragmentExec fragment = new FragmentExec(relation); + ExchangeExec exchange = new ExchangeExec(Source.EMPTY, fragment); + LimitExec limitThenFragment = new LimitExec(Source.EMPTY, exchange, new Literal(Source.EMPTY, 10000, DataType.INTEGER)); + + // All the relation's fields are required. + PhysicalPlan plan = rule.apply(limitThenFragment); + Project project = as( + as(as(as(plan, LimitExec.class).child(), ExchangeExec.class).child(), FragmentExec.class).fragment(), + Project.class + ); + assertThat(project.projections(), containsInAnyOrder(some_field1, some_field2)); + + // FROM test | limit 10000 | keep some_field1 + ProjectExec projectLimitThenFragment = new ProjectExec(Source.EMPTY, limitThenFragment, List.of(some_field1)); + plan = rule.apply(projectLimitThenFragment); + project = as( + as(as(as(as(plan, ProjectExec.class).child(), LimitExec.class).child(), ExchangeExec.class).child(), FragmentExec.class) + .fragment(), + Project.class + ); + assertThat(project.projections(), contains(some_field1)); + + // FROM test | limit 10000 | eval x = to_lower(some_field1) + Alias x = new Alias(Source.EMPTY, "x", new ToLower(Source.EMPTY, some_field1, config)); + EvalExec evalLimitThenFragment = new EvalExec(Source.EMPTY, limitThenFragment, List.of(x)); + plan = rule.apply(evalLimitThenFragment); + project = as( + as(as(as(as(plan, EvalExec.class).child(), LimitExec.class).child(), ExchangeExec.class).child(), FragmentExec.class) + .fragment(), + Project.class + ); + assertThat(project.projections(), containsInAnyOrder(some_field1, some_field2)); + + // FROM test | limit 10000 | eval x = to_lower(some_field1) | keep x + ProjectExec projectEvalLimitThenFragment = new ProjectExec(Source.EMPTY, evalLimitThenFragment, List.of(x.toAttribute())); + plan = rule.apply(projectEvalLimitThenFragment); + project = as( + as( + as(as(as(as(plan, ProjectExec.class).child(), EvalExec.class).child(), LimitExec.class).child(), ExchangeExec.class) + .child(), + FragmentExec.class + ).fragment(), + Project.class + ); + assertThat(project.projections(), contains(some_field1)); + + // FROM test | limit 10000 | rename some_field1 as some_field2 + ProjectExec renameLimitThenFragment = new ProjectExec( + Source.EMPTY, + limitThenFragment, + List.of(new Alias(Source.EMPTY, some_field2.name(), some_field1)) + ); + plan = rule.apply(renameLimitThenFragment); + project = as( + as(as(as(as(plan, ProjectExec.class).child(), LimitExec.class).child(), ExchangeExec.class).child(), FragmentExec.class) + .fragment(), + Project.class + ); + assertThat(project.projections(), contains(some_field1)); + + // FROM test | limit 10000 | eval x = to_lower(some_field1), y = to_upper(x) | keep y + Alias y = new Alias(Source.EMPTY, "y", new ToUpper(Source.EMPTY, x.toAttribute(), config)); + EvalExec evalTwiceLimitThenFragment = new EvalExec(Source.EMPTY, limitThenFragment, List.of(x, y)); + ProjectExec projectEvalTwiceLimitThenFragment = new ProjectExec(Source.EMPTY, evalTwiceLimitThenFragment, List.of(y.toAttribute())); + plan = rule.apply(projectEvalTwiceLimitThenFragment); + project = as( + as( + as(as(as(as(plan, ProjectExec.class).child(), EvalExec.class).child(), LimitExec.class).child(), ExchangeExec.class) + .child(), + FragmentExec.class + ).fragment(), + Project.class + ); + assertThat(project.projections(), contains(some_field1)); + } + + /** + * Expects + * ProjectExec[[avg(emp_no){r}#3]] + * \_EvalExec[[$$SUM$avg(emp_no)$0{r:s}#14 / $$COUNT$avg(emp_no)$1{r:s}#15 AS avg(emp_no)]] + * \_LimitExec[1000[INTEGER]] + * \_AggregateExec[[],[SUM(emp_no{f}#4) AS $$SUM$avg(emp_no)$0, COUNT(emp_no{f}#4) AS $$COUNT$avg(emp_no)$1],FINAL,[sum{r}#16, seen{ + * r}#17, count{r}#18, seen{r}#19],24] + * \_ExchangeExec[[sum{r}#16, seen{r}#17, count{r}#18, seen{r}#19],true] + * \_AggregateExec[[],[SUM(emp_no{f}#4) AS $$SUM$avg(emp_no)$0, COUNT(emp_no{f}#4) AS $$COUNT$avg(emp_no)$1],INITIAL,[sum{r}#37, + * seen{r}#38, count{r}#39, seen{r}#40],16] + * \_FieldExtractExec[emp_no{f}#4] + * \_EsQueryExec[test], indexMode[standard], query[{"exists":{"field":"emp_no","boost":1.0}}][_doc{f}#41], limit[], sort[] + * estimatedRowSize[8] + */ + public void testProjectAwayColumnsDoesNothingForPipelineBreakingAggs() { + var plan = optimizedPlan(physicalPlan(""" + from test + | stats avg(emp_no) + """)); + + ProjectExec project = as(plan, ProjectExec.class); + EvalExec eval = as(project.child(), EvalExec.class); + LimitExec limit = as(eval.child(), LimitExec.class); + AggregateExec finalAgg = as(limit.child(), AggregateExec.class); + ExchangeExec exchange = as(finalAgg.child(), ExchangeExec.class); + // No projection inserted here. + AggregateExec initialAgg = as(exchange.child(), AggregateExec.class); + } + /** * Expects * ProjectExec[[x{r}#3]] @@ -2083,7 +2242,7 @@ public void testFieldExtractWithoutSourceAttributes() { * \_EsQueryExec[test], query[{"esql_single_value":{"field":"emp_no","next":{"range":{"emp_no":{"gt":10,"boost":1.0}}}}}] * [_doc{f}#13], limit[10000], sort[] estimatedRowSize[8] */ - public void testProjectAllFieldsWhenOnlyTheCountMatters() { + public void testProjectAwayAllColumnsWhenOnlyTheCountMatters() { var plan = optimizedPlan(physicalPlan(""" from test | where emp_no > 10 @@ -2101,6 +2260,42 @@ public void testProjectAllFieldsWhenOnlyTheCountMatters() { eval = as(project.child(), EvalExec.class); assertThat(Expressions.names(eval.fields()), contains(nullField)); var source = source(eval.child()); + assertThat(Expressions.names(source.attrs()), contains("_doc")); + } + + /** + * Expects + * + * LimitExec[10000[INTEGER]] + * \_AggregateExec[[],[COUNT([2a][KEYWORD]) AS count(*)],FINAL,[count{r}#13, seen{r}#14],8] + * \_AggregateExec[[],[COUNT([2a][KEYWORD]) AS count(*)],INITIAL,[count{r}#13, seen{r}#14],8] + * \_LimitExec[10[INTEGER]] + * \_ExchangeExec[[<all-fields-projected>{r:s}#28],false] + * \_ProjectExec[[<all-fields-projected>{r:s}#28]] + * \_EvalExec[[null[NULL] AS <all-fields-projected>]] + * \_EsQueryExec[test], indexMode[standard], query[][_doc{f}#29], limit[10], sort[] estimatedRowSize[4] + */ + public void testProjectAwayAllColumnsWhenOnlyTheCountMattersInStats() { + var plan = optimizedPlan(physicalPlan(""" + from test + | limit 10 + | stats count(*) + """)); + + var limit = as(plan, LimitExec.class); + var aggFinal = as(limit.child(), AggregateExec.class); + var aggInitial = as(aggFinal.child(), AggregateExec.class); + var limit10 = as(aggInitial.child(), LimitExec.class); + + var exchange = as(limit10.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var eval = as(project.child(), EvalExec.class); + EsQueryExec esQuery = as(eval.child(), EsQueryExec.class); + + var nullField = ""; + assertThat(Expressions.names(project.projections()), contains(nullField)); + assertThat(Expressions.names(eval.fields()), contains(nullField)); + assertThat(Expressions.names(esQuery.attrs()), contains("_doc")); } /** @@ -2131,7 +2326,7 @@ public void testAvgSurrogateFunctionAfterRenameAndLimit() { var aggFinal = as(limit.child(), AggregateExec.class); assertThat(aggFinal.getMode(), equalTo(FINAL)); var aggPartial = as(aggFinal.child(), AggregateExec.class); - assertThat(aggPartial.getMode(), equalTo(PARTIAL)); + assertThat(aggPartial.getMode(), equalTo(INITIAL)); limit = as(aggPartial.child(), LimitExec.class); assertThat(limit.limit(), instanceOf(Literal.class)); assertThat(limit.limit().fold(), equalTo(10)); @@ -2239,7 +2434,8 @@ public void testGlobalAggFoldingOutput() { var limit = as(optimized, LimitExec.class); var aggFinal = as(limit.child(), AggregateExec.class); var aggPartial = as(aggFinal.child(), AggregateExec.class); - assertThat(Expressions.names(aggPartial.output()), contains("c")); + // The partial aggregation's output is determined via AbstractPhysicalOperationProviders.intermediateAttributes() + assertThat(Expressions.names(aggPartial.output()), contains("count", "seen")); limit = as(aggPartial.child(), LimitExec.class); var exchange = as(limit.child(), ExchangeExec.class); var project = as(exchange.child(), ProjectExec.class); @@ -2375,7 +2571,7 @@ public void testSpatialTypesAndStatsUseDocValuesNestedLiteral() { assertThat("No groupings in aggregation", agg.groupings().size(), equalTo(0)); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); agg = as(agg.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); assertThat("No groupings in aggregation", agg.groupings().size(), equalTo(0)); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); var eval = as(agg.child(), EvalExec.class); @@ -2389,7 +2585,7 @@ public void testSpatialTypesAndStatsUseDocValuesNestedLiteral() { assertThat("No groupings in aggregation", agg.groupings().size(), equalTo(0)); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); agg = as(agg.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); assertThat("No groupings in aggregation", agg.groupings().size(), equalTo(0)); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); eval = as(agg.child(), EvalExec.class); @@ -2444,7 +2640,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregations() { assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, true); @@ -2512,7 +2708,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiSpatialAggregations() { assertAggregation(agg, "cities", SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, "airports", SpatialCentroid.class, GEO_POINT, true); @@ -2578,7 +2774,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsFiltered() { assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, true); @@ -2647,7 +2843,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsGrouped() { assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); att = as(agg.groupings().get(0), Attribute.class); assertThat(att.name(), equalTo("scalerank")); // below the exchange (in data node) the aggregation is using doc-values @@ -2700,7 +2896,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsGroupedAggregat assertAggregation(agg, "count", Sum.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); agg = as(agg.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); assertThat("No groupings in aggregation", agg.groupings().size(), equalTo(0)); assertAggregation(agg, "count", Sum.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); @@ -2726,7 +2922,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsGroupedAggregat assertAggregation(agg, "count", Sum.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); agg = as(agg.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); assertThat("No groupings in aggregation", agg.groupings().size(), equalTo(0)); assertAggregation(agg, "count", Sum.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); @@ -2743,7 +2939,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsGroupedAggregat att = as(agg.groupings().get(0), Attribute.class); assertThat(att.name(), equalTo("scalerank")); // below the exchange (in data node) the aggregation is using doc-values - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); assertAggregation(agg, "count", Count.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, true); var extract = as(agg.child(), FieldExtractExec.class); @@ -3037,7 +3233,7 @@ public void testPushDownSpatialRelatesStringToSourceAndUseDocValuesForCentroid() assertAggregation(agg, "centroid", SpatialCentroid.class, test.locationType(), false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, "centroid", SpatialCentroid.class, test.locationType(), true); @@ -3144,7 +3340,7 @@ public void testPushSpatialIntersectsStringToSourceAndUseDocValuesForCentroid() assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, true); @@ -3249,7 +3445,7 @@ AND ST_INTERSECTS(TO_GEOSHAPE("POLYGON((42 14, 43 14, 43 15, 42 15, 42 14))"), l assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, true); @@ -3330,7 +3526,7 @@ public void testIntersectsOnTwoPointFieldAndBothCentroidUsesDocValues() { assertAggregation(agg, "city_location", SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, "location", SpatialCentroid.class, GEO_POINT, true); @@ -3376,7 +3572,7 @@ public void testIntersectsOnTwoPointFieldAndOneCentroidUsesDocValues() { assertAggregation(agg, aggFieldName, SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, aggFieldName, SpatialCentroid.class, GEO_POINT, true); @@ -3422,7 +3618,7 @@ AND ST_INTERSECTS(city_location, TO_GEOSHAPE("POLYGON((42 14, 43 14, 43 15, 42 1 assertAggregation(agg, "city_location", SpatialCentroid.class, GEO_POINT, false); exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(PARTIAL)); + assertThat("Aggregation is PARTIAL", agg.getMode(), equalTo(INITIAL)); // below the exchange (in data node) the aggregation is using doc-values assertAggregation(agg, "count", Count.class); assertAggregation(agg, "location", SpatialCentroid.class, GEO_POINT, true); @@ -3825,7 +4021,7 @@ public void testEnrichBeforeAggregation() { var finalAggs = as(limit.child(), AggregateExec.class); assertThat(finalAggs.getMode(), equalTo(FINAL)); var partialAggs = as(finalAggs.child(), AggregateExec.class); - assertThat(partialAggs.getMode(), equalTo(PARTIAL)); + assertThat(partialAggs.getMode(), equalTo(INITIAL)); var enrich = as(partialAggs.child(), EnrichExec.class); assertThat(enrich.mode(), equalTo(Enrich.Mode.COORDINATOR)); assertThat(enrich.concreteIndices(), equalTo(Map.of("", ".enrich-departments-3"))); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java index edf75170adc63..a4aef74d0e10a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; @@ -149,6 +150,11 @@ public List output() { return child().output(); } + @Override + protected AttributeSet computeReferences() { + return AttributeSet.EMPTY; + } + @Override public LogicalPlan firstPhase() { return child(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExecSerializationTests.java index ca9dd2045004e..fee1dc6894cc7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExecSerializationTests.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.plan.physical; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -23,9 +25,10 @@ public static AggregateExec randomAggregateExec(int depth) { PhysicalPlan child = randomChild(depth); List groupings = randomFieldAttributes(0, 5, false).stream().map(a -> (Expression) a).toList(); List aggregates = AggregateSerializationTests.randomAggregates(); - AggregateExec.Mode mode = randomFrom(AggregateExec.Mode.values()); + AggregatorMode mode = randomFrom(AggregatorMode.values()); + List intermediateAttributes = randomFieldAttributes(0, 5, false); Integer estimatedRowSize = randomEstimatedRowSize(); - return new AggregateExec(source, child, groupings, aggregates, mode, estimatedRowSize); + return new AggregateExec(source, child, groupings, aggregates, mode, intermediateAttributes, estimatedRowSize); } @Override @@ -38,20 +41,22 @@ protected AggregateExec mutateInstance(AggregateExec instance) throws IOExceptio PhysicalPlan child = instance.child(); List groupings = instance.groupings(); List aggregates = instance.aggregates(); - AggregateExec.Mode mode = instance.getMode(); + List intermediateAttributes = instance.intermediateAttributes(); + AggregatorMode mode = instance.getMode(); Integer estimatedRowSize = instance.estimatedRowSize(); - switch (between(0, 4)) { + switch (between(0, 5)) { case 0 -> child = randomValueOtherThan(child, () -> randomChild(0)); case 1 -> groupings = randomValueOtherThan(groupings, () -> randomFieldAttributes(0, 5, false)); case 2 -> aggregates = randomValueOtherThan(aggregates, AggregateSerializationTests::randomAggregates); - case 3 -> mode = randomValueOtherThan(mode, () -> randomFrom(AggregateExec.Mode.values())); - case 4 -> estimatedRowSize = randomValueOtherThan( + case 3 -> mode = randomValueOtherThan(mode, () -> randomFrom(AggregatorMode.values())); + case 4 -> intermediateAttributes = randomValueOtherThan(intermediateAttributes, () -> randomFieldAttributes(0, 5, false)); + case 5 -> estimatedRowSize = randomValueOtherThan( estimatedRowSize, AbstractPhysicalPlanSerializationTests::randomEstimatedRowSize ); default -> throw new IllegalStateException(); } - return new AggregateExec(instance.source(), child, groupings, aggregates, mode, estimatedRowSize); + return new AggregateExec(instance.source(), child, groupings, aggregates, mode, intermediateAttributes, estimatedRowSize); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index 925601bded425..d0d800431360f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -1227,6 +1227,26 @@ public void testEnrichOnDefaultField() { assertThat(fieldNames, equalTo(ALL_FIELDS)); } + public void testMetrics() { + Set fieldNames = EsqlSession.fieldNames(parser.createStatement(""" + METRICS k8s bytes=sum(rate(network.total_bytes_in)), sum(rate(network.total_cost)) BY cluster"""), Set.of()); + assertThat( + fieldNames, + equalTo( + Set.of( + "@timestamp", + "@timestamp.*", + "network.total_bytes_in", + "network.total_bytes_in.*", + "network.total_cost", + "network.total_cost.*", + "cluster", + "cluster.*" + ) + ) + ); + } + private void assertFieldNames(String query, Set expected) { Set fieldNames = EsqlSession.fieldNames(parser.createStatement(query), Collections.emptySet()); assertThat(fieldNames, equalTo(expected)); From b25962232d4dd6a0aa66cd67091a5e69e79bcd91 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 4 Sep 2024 09:32:17 +0100 Subject: [PATCH 029/115] Create IndexVersionAllocationDecider (#102708) Create IndexVersionAllocationDecider as a counterpart to NodeVersionAllocationDecider, that checks the max index version rather than node version --- .../elasticsearch/cluster/ClusterModule.java | 2 + .../IndexVersionAllocationDecider.java | 142 ++++ .../decider/NodeVersionAllocationDecider.java | 2 +- .../cluster/ClusterModuleTests.java | 18 +- .../IndexVersionAllocationDeciderTests.java | 699 ++++++++++++++++++ .../NodeVersionAllocationDeciderTests.java | 22 +- .../core/ilm/SetSingleNodeAllocateStep.java | 2 + 7 files changed, 865 insertions(+), 22 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/IndexVersionAllocationDecider.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexVersionAllocationDeciderTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 3fba3a7bdbe13..e399e739da047 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.IndexVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeReplacementAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeShutdownAllocationDecider; @@ -364,6 +365,7 @@ public static Collection createAllocationDeciders( addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(clusterSettings)); addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(clusterSettings)); addAllocationDecider(deciders, new EnableAllocationDecider(clusterSettings)); + addAllocationDecider(deciders, new IndexVersionAllocationDecider()); addAllocationDecider(deciders, new NodeVersionAllocationDecider()); addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider()); addAllocationDecider(deciders, new RestoreInProgressAllocationDecider()); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/IndexVersionAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/IndexVersionAllocationDecider.java new file mode 100644 index 0000000000000..05f79e967ff46 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/IndexVersionAllocationDecider.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation.decider; + +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; + +/** + * An allocation decider that prevents relocation or allocation from nodes + * that might not be index compatible. If we relocate from a node that uses + * a newer index version than the node we relocate to this might cause {@link org.apache.lucene.index.IndexFormatTooNewException} + * on the lowest level since it might have already written segments that use a new postings format or codec that is not + * available on the target node. + */ +public class IndexVersionAllocationDecider extends AllocationDecider { + + public static final String NAME = "index_version"; + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (shardRouting.primary()) { + if (shardRouting.currentNodeId() == null) { + if (shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { + // restoring from a snapshot - check that the node can handle the version + return isVersionCompatible((SnapshotRecoverySource) shardRouting.recoverySource(), node, allocation); + } else { + // existing or fresh primary on the node + return allocation.decision(Decision.YES, NAME, "no existing allocation, assuming compatible"); + } + } else { + // relocating primary, only migrate to newer host + return isIndexVersionCompatibleRelocatePrimary(allocation.routingNodes(), shardRouting.currentNodeId(), node, allocation); + } + } else { + final ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting.shardId()); + // check that active primary has a newer version so that peer recovery works + if (primary != null) { + return isIndexVersionCompatibleAllocatingReplica(allocation.routingNodes(), primary.currentNodeId(), node, allocation); + } else { + // ReplicaAfterPrimaryActiveAllocationDecider should prevent this case from occurring + return allocation.decision(Decision.YES, NAME, "no active primary shard yet"); + } + } + } + + @Override + public Decision canForceAllocateDuringReplace(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return canAllocate(shardRouting, node, allocation); + } + + private static Decision isIndexVersionCompatibleRelocatePrimary( + final RoutingNodes routingNodes, + final String sourceNodeId, + final RoutingNode target, + final RoutingAllocation allocation + ) { + final RoutingNode source = routingNodes.node(sourceNodeId); + if (target.node().getMaxIndexVersion().onOrAfter(source.node().getMaxIndexVersion())) { + return allocation.decision( + Decision.YES, + NAME, + "can relocate primary shard from a node with index version [%s] to a node with equal-or-newer index version [%s]", + source.node().getMaxIndexVersion().toReleaseVersion(), + target.node().getMaxIndexVersion().toReleaseVersion() + ); + } else { + return allocation.decision( + Decision.NO, + NAME, + "cannot relocate primary shard from a node with index version [%s] to a node with older index version [%s]", + source.node().getMaxIndexVersion().toReleaseVersion(), + target.node().getMaxIndexVersion().toReleaseVersion() + ); + } + } + + private static Decision isIndexVersionCompatibleAllocatingReplica( + final RoutingNodes routingNodes, + final String sourceNodeId, + final RoutingNode target, + final RoutingAllocation allocation + ) { + final RoutingNode source = routingNodes.node(sourceNodeId); + if (target.node().getMaxIndexVersion().onOrAfter(source.node().getMaxIndexVersion())) { + /* we can allocate if we can recover from a node that is younger or on the same version + * if the primary is already running on a newer version that won't work due to possible + * differences in the lucene index format etc.*/ + return allocation.decision( + Decision.YES, + NAME, + "can allocate replica shard to a node with index version [%s]" + + " since this is equal-or-newer than the primary index version [%s]", + target.node().getMaxIndexVersion().toReleaseVersion(), + source.node().getMaxIndexVersion().toReleaseVersion() + ); + } else { + return allocation.decision( + Decision.NO, + NAME, + "cannot allocate replica shard to a node with index version [%s]" + + " since this is older than the primary index version [%s]", + target.node().getMaxIndexVersion().toReleaseVersion(), + source.node().getMaxIndexVersion().toReleaseVersion() + ); + } + } + + private static Decision isVersionCompatible( + SnapshotRecoverySource recoverySource, + final RoutingNode target, + final RoutingAllocation allocation + ) { + if (target.node().getMaxIndexVersion().onOrAfter(recoverySource.version())) { + /* we can allocate if we can restore from a snapshot that is older or on the same version */ + return allocation.decision( + Decision.YES, + NAME, + "max supported index version [%s] is the same or newer than snapshot version [%s]", + target.node().getMaxIndexVersion().toReleaseVersion(), + recoverySource.version().toReleaseVersion() + ); + } else { + return allocation.decision( + Decision.NO, + NAME, + "max supported index version [%s] is older than the snapshot version [%s]", + target.node().getMaxIndexVersion().toReleaseVersion(), + recoverySource.version().toReleaseVersion() + ); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java index 0c25d30593abd..033fbe38154ca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java @@ -30,7 +30,7 @@ public class NodeVersionAllocationDecider extends AllocationDecider { public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (shardRouting.primary()) { if (shardRouting.currentNodeId() == null) { - if (shardRouting.recoverySource() != null && shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { + if (shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { // restoring from a snapshot - check that the node can handle the version return isVersionCompatible((SnapshotRecoverySource) shardRouting.recoverySource(), node, allocation); } else { diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 544151d2adcd4..cac6fd5c28f85 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.IndexVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeReplacementAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeShutdownAllocationDecider; @@ -49,17 +50,20 @@ import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import org.junit.AfterClass; import org.junit.BeforeClass; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.contains; public class ClusterModuleTests extends ModuleTestCase { private ClusterInfoService clusterInfoService = EmptyClusterInfoService.INSTANCE; @@ -232,7 +236,7 @@ public void testShardsAllocatorFactoryNull() { // running them. If the order of the deciders is changed for a valid reason, the order should be // changed in the test too. public void testAllocationDeciderOrder() { - List> expectedDeciders = Arrays.asList( + Stream> expectedDeciders = Stream.of( MaxRetryAllocationDecider.class, ResizeAllocationDecider.class, ReplicaAfterPrimaryActiveAllocationDecider.class, @@ -240,6 +244,7 @@ public void testAllocationDeciderOrder() { ClusterRebalanceAllocationDecider.class, ConcurrentRebalanceAllocationDecider.class, EnableAllocationDecider.class, + IndexVersionAllocationDecider.class, NodeVersionAllocationDecider.class, SnapshotInProgressAllocationDecider.class, RestoreInProgressAllocationDecider.class, @@ -257,12 +262,7 @@ public void testAllocationDeciderOrder() { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), Collections.emptyList() ); - Iterator iter = deciders.iterator(); - int idx = 0; - while (iter.hasNext()) { - AllocationDecider decider = iter.next(); - assertSame(decider.getClass(), expectedDeciders.get(idx++)); - } + assertThat(deciders, contains(expectedDeciders.>map(Matchers::instanceOf).toList())); } public void testRejectsReservedExistingShardsAllocatorName() { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexVersionAllocationDeciderTests.java new file mode 100644 index 0000000000000..ee46e8da5755d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexVersionAllocationDeciderTests.java @@ -0,0 +1,699 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.AllocationId; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; +import org.elasticsearch.cluster.routing.RoutingChangesObserver; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingNodesHelper; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.IndexVersionAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.EmptySnapshotsInfoService; +import org.elasticsearch.snapshots.InternalSnapshotsInfoService; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotShardSizeInfo; +import org.elasticsearch.test.gateway.TestGatewayAllocator; +import org.elasticsearch.test.index.IndexVersionUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.shuffle; +import static org.elasticsearch.cluster.routing.RoutingNodesHelper.shardsWithState; +import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; + +public class IndexVersionAllocationDeciderTests extends ESAllocationTestCase { + + public void testDoNotAllocateFromPrimary() { + AllocationService strategy = createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") + .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) + .build() + ); + + logger.info("Building initial routing table"); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(5).numberOfReplicas(2)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) + .addAsNew(metadata.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build(); + + assertThat(clusterState.routingTable().index("test").size(), equalTo(5)); + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(0).state(), equalTo(UNASSIGNED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(1).state(), equalTo(UNASSIGNED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(2).state(), equalTo(UNASSIGNED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(0).currentNodeId(), nullValue()); + assertThat(clusterState.routingTable().index("test").shard(i).shard(1).currentNodeId(), nullValue()); + assertThat(clusterState.routingTable().index("test").shard(i).shard(2).currentNodeId(), nullValue()); + } + + logger.info("start two nodes and fully start the shards"); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop()); + + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(2)); + + } + + logger.info("start all the primary shards, replicas will start initializing"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1)); + } + + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1)); + } + + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3", Version.CURRENT, IndexVersionUtils.getPreviousVersion())) + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop()); + + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1)); + } + + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build(); + clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop()); + + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1)); + } + + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(2)); + } + } + + public void testRandom() { + AllocationService service = createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") + .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) + .build() + ); + + logger.info("Building initial routing table"); + Metadata.Builder builder = Metadata.builder(); + RoutingTable.Builder rtBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); + int numIndices = between(1, 20); + for (int i = 0; i < numIndices; i++) { + builder.put( + IndexMetadata.builder("test_" + i) + .settings(settings(IndexVersion.current())) + .numberOfShards(between(1, 5)) + .numberOfReplicas(between(0, 2)) + ); + } + Metadata metadata = builder.build(); + + for (int i = 0; i < numIndices; i++) { + rtBuilder.addAsNew(metadata.index("test_" + i)); + } + RoutingTable routingTable = rtBuilder.build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).build(); + assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).size(), equalTo((int) routingTable.allShards().count())); + List nodes = new ArrayList<>(); + int nodeIdx = 0; + int iters = scaledRandomIntBetween(10, 100); + for (int i = 0; i < iters; i++) { + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + int numNodes = between(1, 20); + if (nodes.size() > numNodes) { + shuffle(nodes, random()); + nodes = nodes.subList(0, numNodes); + } else { + for (int j = nodes.size(); j < numNodes; j++) { + if (frequently()) { + if (randomBoolean()) { + nodes.add(newNode("node" + (nodeIdx++), Version.CURRENT, IndexVersionUtils.getPreviousVersion())); + } else { + nodes.add(newNode("node" + (nodeIdx++), Version.CURRENT, IndexVersion.current())); + } + } else { + nodes.add(newNode("node" + (nodeIdx++), Version.CURRENT, IndexVersionUtils.randomVersion())); + } + } + } + for (DiscoveryNode node : nodes) { + nodesBuilder.add(node); + } + clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); + clusterState = stabilize(clusterState, service); + } + } + + public void testRollingRestart() { + AllocationService service = createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") + .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) + .build() + ); + + logger.info("Building initial routing table"); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(5).numberOfReplicas(2)) + .build(); + + RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) + .addAsNew(metadata.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).build(); + + assertThat(clusterState.routingTable().index("test").size(), equalTo(5)); + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(0).state(), equalTo(UNASSIGNED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(1).state(), equalTo(UNASSIGNED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(2).state(), equalTo(UNASSIGNED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(0).currentNodeId(), nullValue()); + assertThat(clusterState.routingTable().index("test").shard(i).shard(1).currentNodeId(), nullValue()); + assertThat(clusterState.routingTable().index("test").shard(i).shard(2).currentNodeId(), nullValue()); + } + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("old0", Version.CURRENT, IndexVersionUtils.getPreviousVersion())) + .add(newNode("old1", Version.CURRENT, IndexVersionUtils.getPreviousVersion())) + .add(newNode("old2", Version.CURRENT, IndexVersionUtils.getPreviousVersion())) + ) + .build(); + clusterState = stabilize(clusterState, service); + + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("old0", Version.CURRENT, IndexVersionUtils.getPreviousVersion())) + .add(newNode("old1", Version.CURRENT, IndexVersionUtils.getPreviousVersion())) + .add(newNode("new0")) + ) + .build(); + + clusterState = stabilize(clusterState, service); + + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("node0", Version.CURRENT, IndexVersionUtils.getPreviousVersion())) + .add(newNode("new1")) + .add(newNode("new0")) + ) + .build(); + + clusterState = stabilize(clusterState, service); + + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("new2")).add(newNode("new1")).add(newNode("new0"))) + .build(); + + clusterState = stabilize(clusterState, service); + for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) { + assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(0).state(), equalTo(STARTED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(1).state(), equalTo(STARTED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(2).state(), equalTo(STARTED)); + assertThat(clusterState.routingTable().index("test").shard(i).shard(0).currentNodeId(), notNullValue()); + assertThat(clusterState.routingTable().index("test").shard(i).shard(1).currentNodeId(), notNullValue()); + assertThat(clusterState.routingTable().index("test").shard(i).shard(2).currentNodeId(), notNullValue()); + } + } + + public void testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNodes() { + ShardId shard1 = new ShardId("test1", "_na_", 0); + ShardId shard2 = new ShardId("test2", "_na_", 0); + final DiscoveryNode newNode = DiscoveryNodeUtils.builder("newNode").roles(MASTER_DATA_ROLES).build(); + final DiscoveryNode oldNode1 = DiscoveryNodeUtils.builder("oldNode1") + .roles(MASTER_DATA_ROLES) + .version(Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.getPreviousVersion()) + .build(); + final DiscoveryNode oldNode2 = DiscoveryNodeUtils.builder("oldNode2") + .roles(MASTER_DATA_ROLES) + .version(Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.getPreviousVersion()) + .build(); + AllocationId allocationId1P = AllocationId.newInitializing(); + AllocationId allocationId1R = AllocationId.newInitializing(); + AllocationId allocationId2P = AllocationId.newInitializing(); + AllocationId allocationId2R = AllocationId.newInitializing(); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(shard1.getIndexName()) + .settings(settings(IndexVersion.current()).put(Settings.EMPTY)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(0, Sets.newHashSet(allocationId1P.getId(), allocationId1R.getId())) + ) + .put( + IndexMetadata.builder(shard2.getIndexName()) + .settings(settings(IndexVersion.current()).put(Settings.EMPTY)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(0, Sets.newHashSet(allocationId2P.getId(), allocationId2R.getId())) + ) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shard1.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shard1).addShard( + shardRoutingBuilder(shard1.getIndexName(), shard1.getId(), newNode.getId(), true, ShardRoutingState.STARTED) + .withAllocationId(allocationId1P) + .build() + ) + .addShard( + shardRoutingBuilder( + shard1.getIndexName(), + shard1.getId(), + oldNode1.getId(), + false, + ShardRoutingState.STARTED + ).withAllocationId(allocationId1R).build() + ) + ) + ) + .add( + IndexRoutingTable.builder(shard2.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shard2).addShard( + shardRoutingBuilder(shard2.getIndexName(), shard2.getId(), newNode.getId(), true, ShardRoutingState.STARTED) + .withAllocationId(allocationId2P) + .build() + ) + .addShard( + shardRoutingBuilder( + shard2.getIndexName(), + shard2.getId(), + oldNode1.getId(), + false, + ShardRoutingState.STARTED + ).withAllocationId(allocationId2R).build() + ) + ) + ) + .build(); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)) + .build(); + AllocationDeciders allocationDeciders = new AllocationDeciders(Collections.singleton(new IndexVersionAllocationDecider())); + AllocationService strategy = new MockAllocationService( + allocationDeciders, + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + state = strategy.reroute(state, new AllocationCommands(), true, false, false, ActionListener.noop()).clusterState(); + // the two indices must stay as is, the replicas cannot move to oldNode2 because versions don't match + assertThat(state.routingTable().index(shard2.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0)); + assertThat(state.routingTable().index(shard1.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0)); + } + + public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() { + final DiscoveryNode newNode = DiscoveryNodeUtils.builder("newNode").roles(MASTER_DATA_ROLES).build(); + final DiscoveryNode oldNode1 = DiscoveryNodeUtils.builder("oldNode1") + .roles(MASTER_DATA_ROLES) + .version(Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.getPreviousVersion()) + .build(); + final DiscoveryNode oldNode2 = DiscoveryNodeUtils.builder("oldNode2") + .roles(MASTER_DATA_ROLES) + .version(Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.getPreviousVersion()) + .build(); + + final Snapshot snapshot = new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())); + final IndexId indexId = new IndexId("test", UUIDs.randomBase64UUID(random())); + + final int numberOfShards = randomIntBetween(1, 3); + final IndexMetadata.Builder indexMetadata = IndexMetadata.builder("test") + .settings(settings(IndexVersion.current())) + .numberOfShards(numberOfShards) + .numberOfReplicas(randomIntBetween(0, 3)); + for (int i = 0; i < numberOfShards; i++) { + indexMetadata.putInSyncAllocationIds(i, Collections.singleton("_test_")); + } + Metadata metadata = Metadata.builder().put(indexMetadata).build(); + + final Map snapshotShardSizes = new HashMap<>(numberOfShards); + final Index index = metadata.index("test").getIndex(); + for (int i = 0; i < numberOfShards; i++) { + final ShardId shardId = new ShardId(index, i); + snapshotShardSizes.put(new InternalSnapshotsInfoService.SnapshotShard(snapshot, indexId, shardId), randomNonNegativeLong()); + } + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(metadata) + .routingTable( + RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) + .addAsRestore( + metadata.index("test"), + new SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, IndexVersion.current(), indexId) + ) + .build() + ) + .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)) + .build(); + AllocationDeciders allocationDeciders = new AllocationDeciders( + Arrays.asList(new ReplicaAfterPrimaryActiveAllocationDecider(), new IndexVersionAllocationDecider()) + ); + AllocationService strategy = new MockAllocationService( + allocationDeciders, + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + () -> new SnapshotShardSizeInfo(snapshotShardSizes) + ); + state = strategy.reroute(state, new AllocationCommands(), true, false, false, ActionListener.noop()).clusterState(); + + // Make sure that primary shards are only allocated on the new node + for (int i = 0; i < numberOfShards; i++) { + assertEquals("newNode", state.routingTable().index("test").shard(i).primaryShard().currentNodeId()); + } + } + + private ClusterState stabilize(ClusterState clusterState, AllocationService service) { + logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); + + clusterState = service.disassociateDeadNodes(clusterState, true, "reroute"); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); + assertRecoveryIndexVersions(routingNodes); + + logger.info("complete rebalancing"); + boolean changed; + do { + logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); + ClusterState newState = startInitializingShardsAndReroute(service, clusterState); + changed = newState.equals(clusterState) == false; + clusterState = newState; + routingNodes = clusterState.getRoutingNodes(); + assertRecoveryIndexVersions(routingNodes); + } while (changed); + return clusterState; + } + + private void assertRecoveryIndexVersions(RoutingNodes routingNodes) { + logger.trace("RoutingNodes: {}", routingNodes); + + List mutableShardRoutings = shardsWithState(routingNodes, ShardRoutingState.RELOCATING); + for (ShardRouting r : mutableShardRoutings) { + if (r.primary()) { + String toId = r.relocatingNodeId(); + String fromId = r.currentNodeId(); + assertThat(fromId, notNullValue()); + assertThat(toId, notNullValue()); + logger.trace( + "From: {} with IndexVersion: {} to: {} with IndexVersion: {}", + fromId, + routingNodes.node(fromId).node().getMaxIndexVersion(), + toId, + routingNodes.node(toId).node().getMaxIndexVersion() + ); + assertTrue( + routingNodes.node(toId).node().getMaxIndexVersion().onOrAfter(routingNodes.node(fromId).node().getMaxIndexVersion()) + ); + } else { + ShardRouting primary = routingNodes.activePrimary(r.shardId()); + assertThat(primary, notNullValue()); + String fromId = primary.currentNodeId(); + String toId = r.relocatingNodeId(); + logger.trace( + "From: {} with IndexVersion: {} to: {} with IndexVersion: {}", + fromId, + routingNodes.node(fromId).node().getMaxIndexVersion(), + toId, + routingNodes.node(toId).node().getMaxIndexVersion() + ); + assertTrue( + routingNodes.node(toId).node().getMaxIndexVersion().onOrAfter(routingNodes.node(fromId).node().getMaxIndexVersion()) + ); + } + } + + mutableShardRoutings = shardsWithState(routingNodes, ShardRoutingState.INITIALIZING); + for (ShardRouting r : mutableShardRoutings) { + if (r.primary() == false) { + ShardRouting primary = routingNodes.activePrimary(r.shardId()); + assertThat(primary, notNullValue()); + String fromId = primary.currentNodeId(); + String toId = r.currentNodeId(); + logger.trace( + "From: {} with IndexVersion: {} to: {} with IndexVersion: {}", + fromId, + routingNodes.node(fromId).node().getMaxIndexVersion(), + toId, + routingNodes.node(toId).node().getMaxIndexVersion() + ); + assertTrue( + routingNodes.node(toId).node().getMaxIndexVersion().onOrAfter(routingNodes.node(fromId).node().getMaxIndexVersion()) + ); + } + } + } + + public void testMessages() { + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) + .addAsNew(metadata.index("test")) + .build(); + + RoutingNode newNode = RoutingNodesHelper.routingNode("newNode", newNode("newNode", Version.CURRENT, IndexVersion.current())); + RoutingNode oldNode = RoutingNodesHelper.routingNode( + "oldNode", + newNode("oldNode", Version.CURRENT, IndexVersionUtils.getPreviousVersion()) + ); + + final ClusterName clusterName = ClusterName.DEFAULT; + ClusterState clusterState = ClusterState.builder(clusterName) + .metadata(metadata) + .routingTable(initialRoutingTable) + .nodes(DiscoveryNodes.builder().add(newNode.node()).add(oldNode.node())) + .build(); + + final ShardId shardId = clusterState.routingTable().index("test").shard(0).shardId(); + final ShardRouting primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); + final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().get(0); + + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0); + routingAllocation.debugDecision(true); + + final IndexVersionAllocationDecider allocationDecider = new IndexVersionAllocationDecider(); + Decision decision = allocationDecider.canAllocate(primaryShard, newNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat(decision.getExplanation(), is("no existing allocation, assuming compatible")); + + decision = allocationDecider.canAllocate(ShardRoutingHelper.initialize(primaryShard, "oldNode"), newNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat( + decision.getExplanation(), + is( + "can relocate primary shard from a node with index version [" + + oldNode.node().getMaxIndexVersion().toReleaseVersion() + + "] to a node with equal-or-newer index version [" + + newNode.node().getMaxIndexVersion().toReleaseVersion() + + "]" + ) + ); + + decision = allocationDecider.canAllocate(ShardRoutingHelper.initialize(primaryShard, "newNode"), oldNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.NO)); + assertThat( + decision.getExplanation(), + is( + "cannot relocate primary shard from a node with index version [" + + newNode.node().getMaxIndexVersion().toReleaseVersion() + + "] to a node with older index version [" + + oldNode.node().getMaxIndexVersion().toReleaseVersion() + + "]" + ) + ); + + final IndexId indexId = new IndexId("test", UUIDs.randomBase64UUID(random())); + final SnapshotRecoverySource newVersionSnapshot = new SnapshotRecoverySource( + UUIDs.randomBase64UUID(), + new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), + newNode.node().getMaxIndexVersion(), + indexId + ); + final SnapshotRecoverySource oldVersionSnapshot = new SnapshotRecoverySource( + UUIDs.randomBase64UUID(), + new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), + oldNode.node().getMaxIndexVersion(), + indexId + ); + + decision = allocationDecider.canAllocate( + ShardRoutingHelper.newWithRestoreSource(primaryShard, newVersionSnapshot), + oldNode, + routingAllocation + ); + assertThat(decision.type(), is(Decision.Type.NO)); + assertThat( + decision.getExplanation(), + is( + "max supported index version [" + + oldNode.node().getMaxIndexVersion().toReleaseVersion() + + "] is older than the snapshot version [" + + newNode.node().getMaxIndexVersion().toReleaseVersion() + + "]" + ) + ); + + decision = allocationDecider.canAllocate( + ShardRoutingHelper.newWithRestoreSource(primaryShard, oldVersionSnapshot), + newNode, + routingAllocation + ); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat( + decision.getExplanation(), + is( + "max supported index version [" + + newNode.node().getMaxIndexVersion().toReleaseVersion() + + "] is the same or newer than snapshot version [" + + oldNode.node().getMaxIndexVersion().toReleaseVersion() + + "]" + ) + ); + + final RoutingChangesObserver routingChangesObserver = new RoutingChangesObserver() { + }; + final RoutingNodes routingNodes = clusterState.mutableRoutingNodes(); + final ShardRouting startedPrimary = routingNodes.startShard( + routingNodes.initializeShard(primaryShard, "newNode", null, 0, routingChangesObserver), + routingChangesObserver, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE + ); + routingAllocation = new RoutingAllocation(null, routingNodes, clusterState, null, null, 0); + routingAllocation.debugDecision(true); + + decision = allocationDecider.canAllocate(replicaShard, oldNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.NO)); + assertThat( + decision.getExplanation(), + is( + "cannot allocate replica shard to a node with index version [" + + oldNode.node().getMaxIndexVersion().toReleaseVersion() + + "] since this is older than the primary index version [" + + newNode.node().getMaxIndexVersion().toReleaseVersion() + + "]" + ) + ); + + routingNodes.startShard( + routingNodes.relocateShard(startedPrimary, "oldNode", 0, "test", routingChangesObserver).v2(), + routingChangesObserver, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE + ); + routingAllocation = new RoutingAllocation(null, routingNodes, clusterState, null, null, 0); + routingAllocation.debugDecision(true); + + decision = allocationDecider.canAllocate(replicaShard, newNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat( + decision.getExplanation(), + is( + "can allocate replica shard to a node with index version [" + + newNode.node().getMaxIndexVersion().toReleaseVersion() + + "] since this is equal-or-newer than the primary index version [" + + oldNode.node().getMaxIndexVersion().toReleaseVersion() + + "]" + ) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index a9a02cedf2766..cf710be75b536 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -217,14 +217,12 @@ public void testRandom() { for (int j = nodes.size(); j < numNodes; j++) { if (frequently()) { if (randomBoolean()) { - nodes.add( - newNode("node" + (nodeIdx++), VersionUtils.getPreviousVersion(), IndexVersionUtils.getPreviousVersion()) - ); + nodes.add(newNode("node" + (nodeIdx++), VersionUtils.getPreviousVersion(), IndexVersion.current())); } else { nodes.add(newNode("node" + (nodeIdx++), Version.CURRENT, IndexVersion.current())); } } else { - nodes.add(newNode("node" + (nodeIdx++), VersionUtils.randomVersion(random()), IndexVersionUtils.randomVersion())); + nodes.add(newNode("node" + (nodeIdx++), VersionUtils.randomVersion(random()), IndexVersion.current())); } } } @@ -270,9 +268,9 @@ public void testRollingRestart() { clusterState = ClusterState.builder(clusterState) .nodes( DiscoveryNodes.builder() - .add(newNode("old0", VersionUtils.getPreviousVersion(), IndexVersionUtils.getPreviousVersion())) - .add(newNode("old1", VersionUtils.getPreviousVersion(), IndexVersionUtils.getPreviousVersion())) - .add(newNode("old2", VersionUtils.getPreviousVersion(), IndexVersionUtils.getPreviousVersion())) + .add(newNode("old0", VersionUtils.getPreviousVersion(), IndexVersion.current())) + .add(newNode("old1", VersionUtils.getPreviousVersion(), IndexVersion.current())) + .add(newNode("old2", VersionUtils.getPreviousVersion(), IndexVersion.current())) ) .build(); clusterState = stabilize(clusterState, service); @@ -280,8 +278,8 @@ public void testRollingRestart() { clusterState = ClusterState.builder(clusterState) .nodes( DiscoveryNodes.builder() - .add(newNode("old0", VersionUtils.getPreviousVersion(), IndexVersionUtils.getPreviousVersion())) - .add(newNode("old1", VersionUtils.getPreviousVersion(), IndexVersionUtils.getPreviousVersion())) + .add(newNode("old0", VersionUtils.getPreviousVersion(), IndexVersion.current())) + .add(newNode("old1", VersionUtils.getPreviousVersion(), IndexVersion.current())) .add(newNode("new0")) ) .build(); @@ -291,7 +289,7 @@ public void testRollingRestart() { clusterState = ClusterState.builder(clusterState) .nodes( DiscoveryNodes.builder() - .add(newNode("node0", VersionUtils.getPreviousVersion(), IndexVersionUtils.getPreviousVersion())) + .add(newNode("node0", VersionUtils.getPreviousVersion(), IndexVersion.current())) .add(newNode("new1")) .add(newNode("new0")) ) @@ -321,11 +319,11 @@ public void testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNode final DiscoveryNode newNode = DiscoveryNodeUtils.builder("newNode").roles(MASTER_DATA_ROLES).build(); final DiscoveryNode oldNode1 = DiscoveryNodeUtils.builder("oldNode1") .roles(MASTER_DATA_ROLES) - .version(VersionUtils.getPreviousVersion(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.getPreviousVersion()) + .version(VersionUtils.getPreviousVersion(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) .build(); final DiscoveryNode oldNode2 = DiscoveryNodeUtils.builder("oldNode2") .roles(MASTER_DATA_ROLES) - .version(VersionUtils.getPreviousVersion(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.getPreviousVersion()) + .version(VersionUtils.getPreviousVersion(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) .build(); AllocationId allocationId1P = AllocationId.newInitializing(); AllocationId allocationId1R = AllocationId.newInitializing(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java index 1f636205bd867..1744d0e0384fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.IndexVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeReplacementAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeShutdownAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; @@ -74,6 +75,7 @@ public void performAction( new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ), DataTierAllocationDecider.INSTANCE, + new IndexVersionAllocationDecider(), new NodeVersionAllocationDecider(), new NodeShutdownAllocationDecider(), new NodeReplacementAllocationDecider() From 6cfbeafe14cf74465aa75b666b3a34da11a50693 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 4 Sep 2024 10:50:18 +0200 Subject: [PATCH 030/115] Clarify comment in SearchExecutionContext#setLookupProviders (#112487) Remove a TODO around asserting that setLookupProviders is only called as part of the fetch phase. That is not the case, hence we are not going to address that, but it makes sense to replace the TODO with a comment that clarifies how the method may be used and called. --- .../org/elasticsearch/index/query/SearchExecutionContext.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index f25a0c73ac25d..106a5e811c48d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -511,7 +511,8 @@ public void setLookupProviders( SourceProvider sourceProvider, Function fieldLookupProvider ) { - // TODO can we assert that this is only called during FetchPhase? + // This isn't called only during fetch phase: there's scenarios where fetch phase is executed as part of the query phase, + // as well as runtime fields loaded from _source that do need a source provider as part of executing the query this.lookup = new SearchLookup( this::getFieldType, (fieldType, searchLookup, fielddataOperation) -> indexFieldDataLookup.apply( From fd41724fa6b4f0977689ef552af30da82e859da6 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 4 Sep 2024 10:52:51 +0200 Subject: [PATCH 031/115] Avoid building O(N) list in can_match phase (#112458) We can be a little faster and memory efficient here by not building a list of shard requests up-front and instead building them as we need them. --- .../action/search/CanMatchNodeRequest.java | 4 ---- .../java/org/elasticsearch/search/SearchService.java | 10 ++++------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java index bc50a9f8f0c2c..30fa4caa79a15 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java @@ -229,10 +229,6 @@ public List getShardLevelRequests() { return shards; } - public List createShardSearchRequests() { - return shards.stream().map(this::createShardSearchRequest).toList(); - } - public ShardSearchRequest createShardSearchRequest(Shard r) { ShardSearchRequest shardSearchRequest = new ShardSearchRequest( new OriginalIndices(r.indices, indicesOptions), diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 67d5d6337d77c..9dc44d5f66948 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1664,13 +1664,11 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set listener) { - final List shardSearchRequests = request.createShardSearchRequests(); - final List responses = new ArrayList<>(shardSearchRequests.size()); - for (ShardSearchRequest shardSearchRequest : shardSearchRequests) { - CanMatchShardResponse canMatchShardResponse; + var shardLevelRequests = request.getShardLevelRequests(); + final List responses = new ArrayList<>(shardLevelRequests.size()); + for (var shardLevelRequest : shardLevelRequests) { try { - canMatchShardResponse = canMatch(shardSearchRequest); - responses.add(new CanMatchNodeResponse.ResponseOrFailure(canMatchShardResponse)); + responses.add(new CanMatchNodeResponse.ResponseOrFailure(canMatch(request.createShardSearchRequest(shardLevelRequest)))); } catch (Exception e) { responses.add(new CanMatchNodeResponse.ResponseOrFailure(e)); } From 3a8edf51977ceed59ea2763dd8341df74685e0c8 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 4 Sep 2024 10:59:13 +0200 Subject: [PATCH 032/115] Fix O(N) list building in TransportSearchAction.asyncSearchExecutor (#112474) This at least avoids the O(N) list building which is needlessly heavy for large index counts. Not sure the logic makes perfect sense in all cases, but it should remain practically unchanged for now (except when there's more than 2 indices and they're all system ones). --- .../action/search/TransportSearchAction.java | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 32ee9c331295c..e29b07eeffe11 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1285,16 +1285,27 @@ private void executeSearch( } Executor asyncSearchExecutor(final String[] indices) { - final List executorsForIndices = Arrays.stream(indices).map(executorSelector::executorForSearch).toList(); - if (executorsForIndices.size() == 1) { // all indices have same executor - return threadPool.executor(executorsForIndices.get(0)); + boolean seenSystem = false; + boolean seenCritical = false; + for (String index : indices) { + final String executorName = executorSelector.executorForSearch(index); + switch (executorName) { + case SYSTEM_READ -> seenSystem = true; + case SYSTEM_CRITICAL_READ -> seenCritical = true; + default -> { + return threadPool.executor(executorName); + } + } } - if (executorsForIndices.size() == 2 - && executorsForIndices.contains(SYSTEM_READ) - && executorsForIndices.contains(SYSTEM_CRITICAL_READ)) { // mix of critical and non critical system indices - return threadPool.executor(SYSTEM_READ); + final String executor; + if (seenSystem == false && seenCritical) { + executor = SYSTEM_CRITICAL_READ; + } else if (seenSystem) { + executor = SYSTEM_READ; + } else { + executor = ThreadPool.Names.SEARCH; } - return threadPool.executor(ThreadPool.Names.SEARCH); + return threadPool.executor(executor); } static BiFunction buildConnectionLookup( From 3747765ab86085975e743da4425f852fdf993e04 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 4 Sep 2024 11:12:11 +0200 Subject: [PATCH 033/115] [DOC] geo_shape field type supports geo_hex aggregation (#112448) --- docs/reference/mapping/types/geo-shape.asciidoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 20f79df8950af..e50c7d73b1b76 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -18,9 +18,8 @@ Documents using this type can be used: ** a <> (for example, intersecting polygons). * to aggregate documents by geographic grids: ** either <> -** or <>. - -Grid aggregations over `geo_hex` grids are not supported for `geo_shape` fields. +** or <> +** or <> [[geo-shape-mapping-options]] [discrete] From 435df33bfaf4d62b3eddef26e2c0613a6ea165cc Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 4 Sep 2024 11:16:23 +0200 Subject: [PATCH 034/115] Fix tests in LegacyGeoShapeWithDocValuesQueryTests (#112467) Similar to what we did in #86118, it filters out points with longitude 180. --- muted-tests.yml | 6 ------ .../index/query/LegacyGeoShapeWithDocValuesQueryTests.java | 5 +++++ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 1a8b0e8526c89..7830bb09670a0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -170,9 +170,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112421 - class: org.elasticsearch.indices.mapping.UpdateMappingIntegrationIT issue: https://github.com/elastic/elasticsearch/issues/112423 -- class: org.elasticsearch.xpack.spatial.index.query.LegacyGeoShapeWithDocValuesQueryTests - method: testIndexPointsFromLine - issue: https://github.com/elastic/elasticsearch/issues/112438 - class: org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests method: "testAggregateIntermediate {TestCase= #2}" issue: https://github.com/elastic/elasticsearch/issues/112461 @@ -188,9 +185,6 @@ tests: - class: org.elasticsearch.xpack.inference.external.http.RequestBasedTaskRunnerTests method: testLoopOneAtATime issue: https://github.com/elastic/elasticsearch/issues/112471 -- class: org.elasticsearch.xpack.spatial.index.query.LegacyGeoShapeWithDocValuesQueryTests - method: testIndexPointsFromPolygon - issue: https://github.com/elastic/elasticsearch/issues/112464 # Examples: # diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java index ce40b19377bc6..053931a882e4c 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; import java.io.IOException; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -220,4 +221,8 @@ public void testFieldAlias() throws IOException { assertHitCount(client().prepareSearch(defaultIndexName).setQuery(geoShapeQuery("alias", multiPoint)), 1L); } + + protected boolean ignoreLons(double[] lons) { + return Arrays.stream(lons).anyMatch(v -> v == 180); + } } From ec51cba1764facd3dd6b71703f5a59f804434d67 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 4 Sep 2024 12:27:13 +0200 Subject: [PATCH 035/115] Speedup reading ClusterFeatures from the wire (#112492) This shows up as rather heavy at times, we can speed it up by skipping collecting the temporary map and reading the lookup into an array instead of a list. --- .../java/org/elasticsearch/cluster/ClusterFeatures.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java index 95cc53376af59..bab68303e8de5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java @@ -140,11 +140,10 @@ private static void writeCanonicalSets(StreamOutput out, Map out.writeMap(nodeFeatureSetIndexes, StreamOutput::writeVInt); } + @SuppressWarnings("unchecked") private static Map> readCanonicalSets(StreamInput in) throws IOException { - List> featureSets = in.readCollectionAsList(i -> i.readCollectionAsImmutableSet(StreamInput::readString)); - Map nodeIndexes = in.readMap(StreamInput::readVInt); - - return nodeIndexes.entrySet().stream().collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> featureSets.get(e.getValue()))); + Set[] featureSets = in.readArray(i -> i.readCollectionAsImmutableSet(StreamInput::readString), Set[]::new); + return in.readImmutableMap(streamInput -> featureSets[streamInput.readVInt()]); } public static ClusterFeatures readFrom(StreamInput in) throws IOException { From 3d8353861768cde38a3afbb734249951b34e58a3 Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Wed, 4 Sep 2024 11:29:38 +0100 Subject: [PATCH 036/115] Refactor health endpoint tests to use more readable style (#112433) This changes the mutate object method in 3 health endpoint related tests to use a lambda switch and local variable pattern that's more compact and understandable This is the promised follow up to #112024 --- .../health/ClusterHealthResponsesTests.java | 135 ++++------- .../health/ClusterIndexHealthTests.java | 220 ++++-------------- .../health/ClusterShardHealthTests.java | 137 +++-------- 3 files changed, 112 insertions(+), 380 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index e18757e59c961..6d089f24f8b2c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -263,103 +263,46 @@ protected Predicate getRandomFieldsExcludeFilter() { @Override protected ClusterHealthResponse mutateInstance(ClusterHealthResponse instance) { - String mutate = randomFrom( - "clusterName", - "numberOfPendingTasks", - "numberOfInFlightFetch", - "delayedUnassignedShards", - "taskMaxWaitingTime", - "timedOut", - "clusterStateHealth" - ); - switch (mutate) { - case "clusterName": - return new ClusterHealthResponse( - instance.getClusterName() + randomAlphaOfLengthBetween(2, 5), - instance.getNumberOfPendingTasks(), - instance.getNumberOfInFlightFetch(), - instance.getDelayedUnassignedShards(), - instance.getTaskMaxWaitingTime(), - instance.isTimedOut(), - instance.getClusterStateHealth() - ); - case "numberOfPendingTasks": - return new ClusterHealthResponse( - instance.getClusterName(), - instance.getNumberOfPendingTasks() + between(1, 10), - instance.getNumberOfInFlightFetch(), - instance.getDelayedUnassignedShards(), - instance.getTaskMaxWaitingTime(), - instance.isTimedOut(), - instance.getClusterStateHealth() - ); - case "numberOfInFlightFetch": - return new ClusterHealthResponse( - instance.getClusterName(), - instance.getNumberOfPendingTasks(), - instance.getNumberOfInFlightFetch() + between(1, 10), - instance.getDelayedUnassignedShards(), - instance.getTaskMaxWaitingTime(), - instance.isTimedOut(), - instance.getClusterStateHealth() - ); - case "delayedUnassignedShards": - return new ClusterHealthResponse( - instance.getClusterName(), - instance.getNumberOfPendingTasks(), - instance.getNumberOfInFlightFetch(), - instance.getDelayedUnassignedShards() + between(1, 10), - instance.getTaskMaxWaitingTime(), - instance.isTimedOut(), - instance.getClusterStateHealth() - ); - case "taskMaxWaitingTime": + String clusterName = instance.getClusterName(); + int numberOfPendingTasks = instance.getNumberOfPendingTasks(); + int numberOfInFlightFetch = instance.getNumberOfInFlightFetch(); + int delayedUnassignedShards = instance.getDelayedUnassignedShards(); + TimeValue taskMaxWaitingTime = instance.getTaskMaxWaitingTime(); + boolean timedOut = instance.isTimedOut(); + ClusterStateHealth clusterStateHealth = instance.getClusterStateHealth(); - return new ClusterHealthResponse( - instance.getClusterName(), - instance.getNumberOfPendingTasks(), - instance.getNumberOfInFlightFetch(), - instance.getDelayedUnassignedShards(), - new TimeValue(instance.getTaskMaxWaitingTime().millis() + between(1, 10)), - instance.isTimedOut(), - instance.getClusterStateHealth() - ); - case "timedOut": - return new ClusterHealthResponse( - instance.getClusterName(), - instance.getNumberOfPendingTasks(), - instance.getNumberOfInFlightFetch(), - instance.getDelayedUnassignedShards(), - instance.getTaskMaxWaitingTime(), - instance.isTimedOut() ? false : true, - instance.getClusterStateHealth() - ); - case "clusterStateHealth": - ClusterStateHealth state = instance.getClusterStateHealth(); - ClusterStateHealth newState = new ClusterStateHealth( - state.getActivePrimaryShards() + between(1, 10), - state.getActiveShards(), - state.getRelocatingShards(), - state.getInitializingShards(), - state.getUnassignedShards(), - state.getUnassignedPrimaryShards(), - state.getNumberOfNodes(), - state.getNumberOfDataNodes(), - state.getActiveShardsPercent(), - state.getStatus(), - state.getIndices() - ); - return new ClusterHealthResponse( - instance.getClusterName(), - instance.getNumberOfPendingTasks(), - instance.getNumberOfInFlightFetch(), - instance.getDelayedUnassignedShards(), - instance.getTaskMaxWaitingTime(), - instance.isTimedOut(), - newState - ); - default: - throw new UnsupportedOperationException(); + switch (randomIntBetween(0, 6)) { + case 0 -> clusterName += randomAlphaOfLengthBetween(2, 5); + case 1 -> numberOfPendingTasks += between(1, 10); + case 2 -> numberOfInFlightFetch += between(1, 10); + case 3 -> delayedUnassignedShards += between(1, 10); + case 4 -> taskMaxWaitingTime = new TimeValue(instance.getTaskMaxWaitingTime().millis() + between(1, 10)); + case 5 -> timedOut = timedOut ? false : true; + case 6 -> clusterStateHealth = new ClusterStateHealth( + clusterStateHealth.getActivePrimaryShards() + between(1, 10), + clusterStateHealth.getActiveShards(), + clusterStateHealth.getRelocatingShards(), + clusterStateHealth.getInitializingShards(), + clusterStateHealth.getUnassignedShards(), + clusterStateHealth.getUnassignedPrimaryShards(), + clusterStateHealth.getNumberOfNodes(), + clusterStateHealth.getNumberOfDataNodes(), + clusterStateHealth.getActiveShardsPercent(), + clusterStateHealth.getStatus(), + clusterStateHealth.getIndices() + ); + default -> throw new UnsupportedOperationException(); } + + return new ClusterHealthResponse( + clusterName, + numberOfPendingTasks, + numberOfInFlightFetch, + delayedUnassignedShards, + taskMaxWaitingTime, + timedOut, + clusterStateHealth + ); } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java index 6ec9800839594..832bc227b177c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -141,187 +140,52 @@ protected Predicate getRandomFieldsExcludeFilter() { @Override protected ClusterIndexHealth mutateInstance(ClusterIndexHealth instance) { - String mutate = randomFrom( - "index", - "numberOfShards", - "numberOfReplicas", - "activeShards", - "relocatingShards", - "initializingShards", - "unassignedShards", - "unassignedPrimaryShards", - "activePrimaryShards", - "status", - "shards" - ); - switch (mutate) { - case "index": - return new ClusterIndexHealth( - instance.getIndex() + randomAlphaOfLengthBetween(2, 5), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - instance.getStatus(), - instance.getShards() - ); - case "numberOfShards": - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards() + between(1, 10), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - instance.getStatus(), - instance.getShards() - ); - case "numberOfReplicas": - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas() + between(1, 10), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - instance.getStatus(), - instance.getShards() - ); - case "activeShards": - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards() + between(1, 10), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - instance.getStatus(), - instance.getShards() - ); - case "relocatingShards": - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards() + between(1, 10), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - instance.getStatus(), - instance.getShards() - ); - case "initializingShards": - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards() + between(1, 10), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - instance.getStatus(), - instance.getShards() - ); - case "unassignedShards": - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards() + between(1, 10), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - instance.getStatus(), - instance.getShards() - ); - case "unassignedPrimaryShards": - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards() + between(1, 10), - instance.getActivePrimaryShards(), - instance.getStatus(), - instance.getShards() - ); - case "activePrimaryShards": - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards() + between(1, 10), - instance.getStatus(), - instance.getShards() - ); - case "status": - ClusterHealthStatus status = randomFrom( - Arrays.stream(ClusterHealthStatus.values()).filter(value -> value.equals(instance.getStatus()) == false).toList() - ); - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - status, - instance.getShards() - ); - case "shards": - Map map; + String index = instance.getIndex(); + int numberOfShards = instance.getNumberOfShards(); + int numberOfReplicas = instance.getNumberOfReplicas(); + int activeShards = instance.getActiveShards(); + int relocatingShards = instance.getRelocatingShards(); + int initializingShards = instance.getInitializingShards(); + int unassignedShards = instance.getUnassignedShards(); + int unassignedPrimaryShards = instance.getUnassignedPrimaryShards(); + int activePrimaryShards = instance.getActivePrimaryShards(); + ClusterHealthStatus status = instance.getStatus(); + Map shards = new HashMap<>(instance.getShards()); + + switch (randomIntBetween(0, 10)) { + case 0 -> index += randomAlphaOfLengthBetween(2, 5); + case 1 -> numberOfShards += between(1, 10); + case 2 -> numberOfReplicas += between(1, 10); + case 3 -> activeShards += between(1, 10); + case 4 -> relocatingShards += between(1, 10); + case 5 -> initializingShards += between(1, 10); + case 6 -> unassignedShards += between(1, 10); + case 7 -> unassignedPrimaryShards += between(1, 10); + case 8 -> activePrimaryShards += between(1, 10); + case 9 -> status = randomValueOtherThan(instance.getStatus(), () -> randomFrom(ClusterHealthStatus.values())); + case 10 -> { if (instance.getShards().isEmpty()) { - map = Collections.singletonMap(0, ClusterShardHealthTests.randomShardHealth(0)); + shards = Map.of(0, ClusterShardHealthTests.randomShardHealth(0)); } else { - map = new HashMap<>(instance.getShards()); - map.remove(map.keySet().iterator().next()); + shards.remove(shards.keySet().iterator().next()); } - return new ClusterIndexHealth( - instance.getIndex(), - instance.getNumberOfShards(), - instance.getNumberOfReplicas(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.getActivePrimaryShards(), - instance.getStatus(), - map - ); - default: - throw new UnsupportedOperationException(); + } + default -> throw new UnsupportedOperationException(); } + + return new ClusterIndexHealth( + index, + numberOfShards, + numberOfReplicas, + activeShards, + relocatingShards, + initializingShards, + unassignedShards, + unassignedPrimaryShards, + activePrimaryShards, + status, + shards + ); } private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java index fec87a7b9634e..d5960f317f36a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.function.Predicate; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -104,111 +103,37 @@ protected Predicate getRandomFieldsExcludeFilter() { } @Override - protected ClusterShardHealth mutateInstance(final ClusterShardHealth instance) { - String mutate = randomFrom( - "shardId", - "status", - "activeShards", - "relocatingShards", - "initializingShards", - "unassignedShards", - "unassignedPrimaryShards", - "primaryActive" - ); - switch (mutate) { - case "shardId": - return new ClusterShardHealth( - instance.getShardId() + between(1, 10), - instance.getStatus(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.isPrimaryActive() - ); - case "status": - ClusterHealthStatus status = randomFrom( - Arrays.stream(ClusterHealthStatus.values()).filter(value -> value.equals(instance.getStatus()) == false).toList() - ); - return new ClusterShardHealth( - instance.getShardId(), - status, - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.isPrimaryActive() - ); - case "activeShards": - return new ClusterShardHealth( - instance.getShardId(), - instance.getStatus(), - instance.getActiveShards() + between(1, 10), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.isPrimaryActive() - ); - case "relocatingShards": - return new ClusterShardHealth( - instance.getShardId(), - instance.getStatus(), - instance.getActiveShards(), - instance.getRelocatingShards() + between(1, 10), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.isPrimaryActive() - ); - case "initializingShards": - return new ClusterShardHealth( - instance.getShardId(), - instance.getStatus(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards() + between(1, 10), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.isPrimaryActive() - ); - case "unassignedShards": - return new ClusterShardHealth( - instance.getShardId(), - instance.getStatus(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards() + between(1, 10), - instance.getUnassignedPrimaryShards(), - instance.isPrimaryActive() - ); - case "unassignedPrimaryShards": - return new ClusterShardHealth( - instance.getShardId(), - instance.getStatus(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards() + between(1, 10), - instance.isPrimaryActive() - ); - case "primaryActive": - return new ClusterShardHealth( - instance.getShardId(), - instance.getStatus(), - instance.getActiveShards(), - instance.getRelocatingShards(), - instance.getInitializingShards(), - instance.getUnassignedShards(), - instance.getUnassignedPrimaryShards(), - instance.isPrimaryActive() ? false : true - ); - default: - throw new UnsupportedOperationException(); + protected ClusterShardHealth mutateInstance(ClusterShardHealth instance) { + int shardId = instance.getShardId(); + ClusterHealthStatus status = instance.getStatus(); + int activeShards = instance.getActiveShards(); + int relocatingShards = instance.getRelocatingShards(); + int initializingShards = instance.getInitializingShards(); + int unassignedShards = instance.getUnassignedShards(); + int unassignedPrimaryShards = instance.getUnassignedPrimaryShards(); + boolean primaryActive = instance.isPrimaryActive(); + + switch (randomIntBetween(0, 7)) { + case 0 -> shardId += between(1, 10); + case 1 -> status = randomValueOtherThan(status, () -> randomFrom(ClusterHealthStatus.values())); + case 2 -> activeShards += between(1, 10); + case 3 -> relocatingShards += between(1, 10); + case 4 -> initializingShards += between(1, 10); + case 5 -> unassignedShards += between(1, 10); + case 6 -> unassignedPrimaryShards += between(1, 10); + case 7 -> primaryActive = primaryActive ? false : true; + default -> throw new UnsupportedOperationException(); } + + return new ClusterShardHealth( + shardId, + status, + activeShards, + relocatingShards, + initializingShards, + unassignedShards, + unassignedPrimaryShards, + primaryActive + ); } } From 069a4d49a57194eff2f112395bf73ad90d8fef61 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 4 Sep 2024 12:51:17 +0200 Subject: [PATCH 037/115] Save needlessly collecting list in InternalMappedSignificantTerms (#112502) No need to allocate a list+stream to do an unsafe(ish) cast. --- .../bucket/terms/InternalMappedSignificantTerms.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java index c0d7103e42e8e..0b09ad3d3d85e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java @@ -71,8 +71,9 @@ protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException { } @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) public Iterator iterator() { - return buckets.stream().map(bucket -> (SignificantTerms.Bucket) bucket).toList().iterator(); + return (Iterator) buckets.iterator(); } @Override From 2982fc61e81fa23ed03f7b51854af9f5352666bb Mon Sep 17 00:00:00 2001 From: Dai Sugimori Date: Wed, 4 Sep 2024 19:55:56 +0900 Subject: [PATCH 038/115] [DOCS] Add docs for new Lucene's filters for Japanese text. (#112356) --- docs/plugins/analysis-kuromoji.asciidoc | 120 ++++++++++++++++++++++++ 1 file changed, 120 insertions(+) diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 1f114e9ad9ed6..b1d1d5a751057 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -624,3 +624,123 @@ Which results in: } ] } -------------------------------------------------- + +[[analysis-kuromoji-hiragana-uppercase]] +==== `hiragana_uppercase` token filter + +The `hiragana_uppercase` token filter normalizes small letters (捨て仮名) in hiragana into standard letters. +This filter is useful if you want to search against old style Japanese text such as +patents, legal documents, contract policies, etc. + +For example: + +[source,console] +-------------------------------------------------- +PUT kuromoji_sample +{ + "settings": { + "index": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "kuromoji_tokenizer", + "filter": [ + "hiragana_uppercase" + ] + } + } + } + } + } +} + +GET kuromoji_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "ちょっとまって" +} +-------------------------------------------------- + +Which results in: + +[source,console-result] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "ちよつと", + "start_offset": 0, + "end_offset": 4, + "type": "word", + "position": 0 + }, + { + "token": "まつ", + "start_offset": 4, + "end_offset": 6, + "type": "word", + "position": 1 + }, + { + "token": "て", + "start_offset": 6, + "end_offset": 7, + "type": "word", + "position": 2 + } + ] +} +-------------------------------------------------- + +[[analysis-kuromoji-katakana-uppercase]] +==== `katakana_uppercase` token filter + +The `katakana_uppercase` token filter normalizes small letters (捨て仮名) in katakana into standard letters. +This filter is useful if you want to search against old style Japanese text such as +patents, legal documents, contract policies, etc. + +For example: + +[source,console] +-------------------------------------------------- +PUT kuromoji_sample +{ + "settings": { + "index": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "kuromoji_tokenizer", + "filter": [ + "katakana_uppercase" + ] + } + } + } + } + } +} + +GET kuromoji_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "ストップウォッチ" +} +-------------------------------------------------- + +Which results in: + +[source,console-result] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "ストツプウオツチ", + "start_offset": 0, + "end_offset": 8, + "type": "word", + "position": 0 + } + ] +} +-------------------------------------------------- From 0074c14bfa438579f1dfc39d89d9cce1d1543c62 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 4 Sep 2024 12:19:29 +0100 Subject: [PATCH 039/115] Add known issue for role mappings file bug to 8.15.0 (#112504) --- docs/reference/release-notes/8.15.0.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index 2069c1bd96ff0..bed1912fc1b84 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -26,6 +26,10 @@ memory lock feature (issue: {es-issue}111847[#111847]) <> assume that this value will be within a particular range (e.g. that it fits into a 32-bit signed integer) may encounter errors (issue: {es-issue}111854[#111854]) +* Elasticsearch will not start if custom role mappings are configured using the +`xpack.security.authc.realms.*.files.role_mapping` configuration option. As a workaround, custom role mappings +can be configured using the https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html[REST API] (issue: {es-issue}112503[#112503]) + [[breaking-8.15.0]] [float] === Breaking changes From a36d90cf34d5d190e8a0830c99cc04be66da0af8 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 4 Sep 2024 13:42:40 +0100 Subject: [PATCH 040/115] Use CLDR locale provider on JDK 23+ (#110222) JDK 23 removes the COMPAT locale provider, leaving CLDR as the only option. This commit configures Elasticsearch to use the CLDR provider when on JDK 23, but still use the existing COMPAT provider when on JDK 22 and below. This causes some differences in locale behaviour; this also adapts various tests to still work whether run on COMPAT or CLDR. --- .../src/main/groovy/elasticsearch.ide.gradle | 2 +- .../internal/ElasticsearchTestBasePlugin.java | 2 +- .../server/cli/SystemJvmOptions.java | 17 +++++++++++----- .../painless-field-context.asciidoc | 6 +++--- .../painless-execute-script.asciidoc | 2 +- .../bucket/composite-aggregation.asciidoc | 2 +- .../bucket/datehistogram-aggregation.asciidoc | 4 ++-- .../indices/index-templates.asciidoc | 2 +- docs/reference/mapping/runtime.asciidoc | 6 +++--- docs/reference/ml/ml-shared.asciidoc | 2 +- .../search-your-data/search-api.asciidoc | 2 +- .../test/painless/65_runtime_doc_values.yml | 20 +++++++++---------- .../test/runtime_fields/10_keyword.yml | 6 +++--- .../13_keyword_calculated_at_index.yml | 4 ++-- .../runtime_fields/40_runtime_mappings.yml | 4 ++-- .../runtime_fields/80_multiple_indices.yml | 2 +- muted-tests.yml | 15 -------------- .../rest-api-spec/test/eql/10_basic.yml | 2 +- .../test/eql/20_runtime_mappings.yml | 2 +- .../search/180_locale_dependent_mapping.yml | 10 +++++----- .../search/query/SearchQueryIT.java | 16 +++++---------- ...vedComposableIndexTemplateActionTests.java | 4 ++-- .../common/time/DateFormattersTests.java | 10 +++++----- .../index/mapper/DateFieldMapperTests.java | 4 ++-- .../elasticsearch/license/LicenseUtils.java | 2 +- .../ClusterStateLicenseServiceTests.java | 5 ----- .../rest-api-spec/test/eql/10_basic.yml | 2 +- .../test/eql/20_runtime_mappings.yml | 2 +- .../DateTimeFormatProcessorTests.java | 6 +++--- .../xpack/security/PermissionsIT.java | 2 +- 30 files changed, 73 insertions(+), 92 deletions(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index cae1116d37ea5..dd8b582adb92f 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -167,7 +167,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { vmParameters = [ '-ea', '-Djava.security.manager=allow', - '-Djava.locale.providers=SPI,COMPAT', + '-Djava.locale.providers=SPI,CLDR', '-Des.nativelibs.path="' + testLibraryPath + '"', // TODO: only open these for mockito when it is modularized '--add-opens=java.base/java.security.cert=ALL-UNNAMED', diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 689c8ddecb057..2d6964c041fe2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -92,7 +92,7 @@ public void execute(Task t) { mkdirs(test.getWorkingDir().toPath().resolve("temp").toFile()); // TODO remove once jvm.options are added to test system properties - test.systemProperty("java.locale.providers", "SPI,COMPAT"); + test.systemProperty("java.locale.providers", "SPI,CLDR"); } }); test.getJvmArgumentProviders().add(nonInputProperties); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 94e2d538c0ad0..2d707f150cc8b 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV9; import java.util.List; import java.util.Map; @@ -59,11 +60,7 @@ static List systemJvmOptions(Settings nodeSettings, final Map b.field("type", "date").field("format", "E, d MMM yyyy HH:mm:ss Z").field("locale", "de")) + fieldMapping(b -> b.field("type", "date").field("format", "E, d MMM yyyy HH:mm:ss Z").field("locale", "fr")) ); - mapper.parse(source(b -> b.field("field", "Mi, 06 Dez 2000 02:55:00 -0800"))); + mapper.parse(source(b -> b.field("field", "mer., 6 déc. 2000 02:55:00 -0800"))); } public void testNullValue() throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java index 42f0ddb3f5234..b27c1bb9d449c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -25,7 +25,7 @@ public class LicenseUtils { public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature"; - public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("EEEE, MMMM dd, yyyy"); + public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("EEEE, MMMM dd, yyyy").withLocale(Locale.ENGLISH); /** * Exception to be thrown when a feature action requires a valid license, but license diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java index c0c7c5c59d24b..aaadecef6021c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java @@ -65,11 +65,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -/** - * Due to changes in JDK9 where locale data is used from CLDR, the licence message will differ in jdk 8 and jdk9+ - * https://openjdk.java.net/jeps/252 - * We run ES with -Djava.locale.providers=SPI,COMPAT and same option has to be applied when running this test from IDE - */ public class ClusterStateLicenseServiceTests extends ESTestCase { // must use member mock for generic diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml index dc9fd1e475cc9..e49264d76d5e9 100644 --- a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml @@ -13,7 +13,7 @@ setup: day_of_week: type: keyword script: - source: "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))" + source: "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" - do: bulk: refresh: true diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/20_runtime_mappings.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/20_runtime_mappings.yml index 58462786f9a2f..1c1a39a7bc1ac 100644 --- a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/20_runtime_mappings.yml +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/20_runtime_mappings.yml @@ -9,7 +9,7 @@ setup: day_of_week: type: keyword script: - source: "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))" + source: "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" - do: bulk: refresh: true diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessorTests.java index 997447c525e43..5294fa05dfdea 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessorTests.java @@ -157,12 +157,12 @@ public void testFormatting() { new DateTimeFormat(Source.EMPTY, dateTime, l("YYYY-MM-dd HH:mm:ss.SSSSSSSS"), zoneId).makePipe().asProcessor().process(null) ); assertEquals("+1000", new DateTimeFormat(Source.EMPTY, dateTime, l("Z"), zoneId).makePipe().asProcessor().process(null)); - assertEquals("Etc/GMT-10", new DateTimeFormat(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null)); + assertEquals("GMT+10:00", new DateTimeFormat(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null)); assertEquals("Etc/GMT-10", new DateTimeFormat(Source.EMPTY, dateTime, l("VV"), zoneId).makePipe().asProcessor().process(null)); zoneId = ZoneId.of("America/Sao_Paulo"); assertEquals("-0300", new DateTimeFormat(Source.EMPTY, dateTime, l("Z"), zoneId).makePipe().asProcessor().process(null)); - assertEquals("BRT", new DateTimeFormat(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null)); + assertEquals("GMT-03:00", new DateTimeFormat(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null)); assertEquals( "America/Sao_Paulo", new DateTimeFormat(Source.EMPTY, dateTime, l("VV"), zoneId).makePipe().asProcessor().process(null) @@ -208,7 +208,7 @@ public void testFormatting() { ); assertEquals("Z", new Format(Source.EMPTY, dateTime, l("Z"), zoneId).makePipe().asProcessor().process(null)); assertEquals("+10", new Format(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null)); - assertEquals("Etc/GMT-10", new Format(Source.EMPTY, dateTime, l("K"), zoneId).makePipe().asProcessor().process(null)); + assertEquals("GMT+10:00", new Format(Source.EMPTY, dateTime, l("K"), zoneId).makePipe().asProcessor().process(null)); assertEquals("1", new Format(Source.EMPTY, dateTime, l("F"), zoneId).makePipe().asProcessor().process(null)); assertEquals("12", new Format(Source.EMPTY, dateTime, l("FF"), zoneId).makePipe().asProcessor().process(null)); diff --git a/x-pack/qa/runtime-fields/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/qa/runtime-fields/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java index e938e61734ba6..a76317d42e196 100644 --- a/x-pack/qa/runtime-fields/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/qa/runtime-fields/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -290,7 +290,7 @@ public void testPainlessExecuteWithIndexRequiresReadPrivileges() throws IOExcept painlessExecute.setJsonEntity(""" { "script": { - "source": "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT));" + "source": "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH));" }, "context": "keyword_field", "context_setup": { From d41885347fe872ae832369b368d4d3c33b6e676e Mon Sep 17 00:00:00 2001 From: Mike Barretta Date: Wed, 4 Sep 2024 09:34:01 -0400 Subject: [PATCH 041/115] Missing link added to fips-140-compliance.asciidoc (#112477) (#112516) completed a missing link to the support matrix --- docs/reference/security/fips-140-compliance.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/security/fips-140-compliance.asciidoc b/docs/reference/security/fips-140-compliance.asciidoc index bf880213c2073..5bf73d43541d6 100644 --- a/docs/reference/security/fips-140-compliance.asciidoc +++ b/docs/reference/security/fips-140-compliance.asciidoc @@ -55,7 +55,8 @@ so that the JVM uses FIPS validated implementations of NIST recommended cryptogr Elasticsearch has been tested with Bouncy Castle's https://repo1.maven.org/maven2/org/bouncycastle/bc-fips/1.0.2.4/bc-fips-1.0.2.4.jar[bc-fips 1.0.2.4] and https://repo1.maven.org/maven2/org/bouncycastle/bctls-fips/1.0.17/bctls-fips-1.0.17.jar[bctls-fips 1.0.17]. -Please refer to the [Support Matrix] for details on which combinations of JVM and security provider are supported in FIPS mode. Elasticsearch does not ship with a FIPS certified provider. It is the responsibility of the user +Please refer to the {es} +https://www.elastic.co/support/matrix#matrix_jvm[JVM support matrix] for details on which combinations of JVM and security provider are supported in FIPS mode. Elasticsearch does not ship with a FIPS certified provider. It is the responsibility of the user to install and configure the security provider to ensure compliance with FIPS 140-2. Using a FIPS certified provider will ensure that only approved cryptographic algorithms are used. From d5bae2cdee5d1874070dd15cd641379edc6594a2 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 4 Sep 2024 18:12:19 +0300 Subject: [PATCH 042/115] Control storing array source with index setting (#112397) Introduce an index setting that forces storing the source of leaf field and object arrays in synthetic source mode. Nested objects are excluded as they already preserve ordering in synthetic source. Next step is to introduce override params at the mapper level that will allow disabling the source, or storing the source for arrays (if not enabled at index level), or storing the source for both arrays and singletons. This will happen in follow-up changes, so that we can benchmark the impact of this change in parallel. Related to #112012 --- docs/changelog/112397.yaml | 5 + ...ogsIndexModeRandomDataChallengeRestIT.java | 11 + .../indices.create/20_synthetic_source.yml | 398 ---------- .../21_synthetic_source_stored.yml | 732 ++++++++++++++++++ .../common/settings/IndexScopedSettings.java | 1 + .../elasticsearch/index/IndexSettings.java | 12 + .../index/mapper/DocumentParser.java | 11 +- .../index/mapper/DocumentParserContext.java | 4 + .../elasticsearch/index/mapper/Mapper.java | 53 ++ .../index/mapper/MapperFeatures.java | 3 +- .../mapper/IgnoredSourceFieldMapperTests.java | 110 +++ 11 files changed, 938 insertions(+), 402 deletions(-) create mode 100644 docs/changelog/112397.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml diff --git a/docs/changelog/112397.yaml b/docs/changelog/112397.yaml new file mode 100644 index 0000000000000..e67478ec69b1c --- /dev/null +++ b/docs/changelog/112397.yaml @@ -0,0 +1,5 @@ +pr: 112397 +summary: Control storing array source with index setting +area: Mapping +type: enhancement +issues: [] diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index 8bd62480f333d..ad4302cb04b44 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -8,9 +8,11 @@ package org.elasticsearch.datastreams.logsdb.qa; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.logsdb.datageneration.DataGenerator; import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; @@ -36,12 +38,14 @@ */ public class StandardVersusLogsIndexModeRandomDataChallengeRestIT extends StandardVersusLogsIndexModeChallengeRestIT { private final ObjectMapper.Subobjects subobjects; + private final boolean keepArraySource; private final DataGenerator dataGenerator; public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { super(); this.subobjects = randomFrom(ObjectMapper.Subobjects.values()); + this.keepArraySource = randomBoolean(); var specificationBuilder = DataGeneratorSpecification.builder().withFullyDynamicMapping(randomBoolean()); if (subobjects != ObjectMapper.Subobjects.ENABLED) { @@ -120,6 +124,13 @@ public void contenderMappings(XContentBuilder builder) throws IOException { } } + @Override + public void contenderSettings(Settings.Builder builder) { + if (keepArraySource) { + builder.put(Mapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING.getKey(), "arrays"); + } + } + @Override protected XContentBuilder generateDocument(final Instant timestamp) throws IOException { var document = XContentFactory.jsonBuilder(); diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index fa08efe402b43..265aec75dc9c2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -446,260 +446,6 @@ mixed disabled and enabled objects: - match: { hits.hits.0._source.path.to.bad.value: false } ---- -object array: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - - do: - indices.create: - index: test - body: - mappings: - _source: - mode: synthetic - properties: - id: - type: integer - regular: - properties: - span: - properties: - id: - type: keyword - trace: - properties: - id: - type: keyword - stored: - store_array_source: true - properties: - span: - properties: - id: - type: keyword - trace: - properties: - id: - type: keyword - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "id": 1, "regular": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' - - '{ "create": { } }' - - '{ "id": 2, "stored": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' - - - do: - search: - index: test - sort: id - - - length: { hits.hits.0._source.regular: 2 } - - match: { hits.hits.0._source.regular.span.id: "1" } - - match: { hits.hits.0._source.regular.trace.id: [ "a", "b" ] } - - - length: { hits.hits.1._source.stored: 2 } - - match: { hits.hits.1._source.stored.0.trace.id: a } - - match: { hits.hits.1._source.stored.0.span.id: "1" } - - match: { hits.hits.1._source.stored.1.trace.id: b } - - match: { hits.hits.1._source.stored.1.span.id: "1" } - - ---- -object array within array: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - - do: - indices.create: - index: test - body: - mappings: - _source: - mode: synthetic - properties: - stored: - store_array_source: true - properties: - path: - store_array_source: true - properties: - to: - properties: - trace: - type: keyword - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "stored": [ { "path": [{ "to": { "trace": "A" } }, { "to": { "trace": "B" } } ] }, { "path": { "to": { "trace": "C" } } } ] }' - - - do: - search: - index: test - - - length: { hits.hits.0._source.stored: 2 } - - match: { hits.hits.0._source.stored.0.path.0.to.trace: A } - - match: { hits.hits.0._source.stored.0.path.1.to.trace: B } - - match: { hits.hits.0._source.stored.1.path.to.trace: C } - - ---- -no object array: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - - do: - indices.create: - index: test - body: - mappings: - _source: - mode: synthetic - properties: - stored: - store_array_source: true - properties: - span: - properties: - id: - type: keyword - trace: - properties: - id: - type: keyword - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "stored": { "trace": { "id": "a" }, "span": { "id": "b" } } }' - - - do: - search: - index: test - - - match: { hits.hits.0._source.stored.trace.id: a } - - match: { hits.hits.0._source.stored.span.id: b } - - ---- -field ordering in object array: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - - do: - indices.create: - index: test - body: - mappings: - _source: - mode: synthetic - properties: - a: - type: keyword - b: - store_array_source: true - properties: - aa: - type: keyword - bb: - type: keyword - c: - type: keyword - d: - store_array_source: true - properties: - aa: - type: keyword - bb: - type: keyword - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "c": 1, "d": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ], "a": 2, "b": [ { "bb": 100, "aa": 200 }, { "aa": 300, "bb": 400 } ] }' - - - do: - search: - index: test - - - length: { hits.hits.0._source: 4 } - - match: { hits.hits.0._source: { "a": "2", "b": [ { "bb": 100, "aa": 200 }, { "aa": 300, "bb": 400 } ], "c": "1", "d": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ] } } - - ---- -nested object array next to other fields: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - - do: - indices.create: - index: test - body: - mappings: - _source: - mode: synthetic - properties: - a: - type: keyword - b: - properties: - c: - store_array_source: true - properties: - aa: - type: keyword - bb: - type: keyword - d: - properties: - aa: - type: keyword - bb: - type: keyword - e: - type: keyword - f: - type: keyword - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "a": 1, "b": { "c": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ], "d": [ { "bb": 100, "aa": 200 }, { "aa": 300, "bb": 400 } ], "e": 1000 }, "f": 2000 }' - - - do: - search: - index: test - - - match: { hits.hits.0._source.a: "1" } - - match: { hits.hits.0._source.b.c: [{ "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 }] } - - match: { hits.hits.0._source.b.d.aa: [ "200", "300" ] } - - match: { hits.hits.0._source.b.d.bb: [ "100", "400" ] } - - match: { hits.hits.0._source.b.e: "1000" } - - match: { hits.hits.0._source.f: "2000" } - - --- object with dynamic override: - requires: @@ -1157,99 +903,6 @@ doubly nested object: - match: { hits.hits.3._source.id: 3 } ---- -nested object with stored array: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - - do: - indices.create: - index: test - body: - mappings: - _source: - mode: synthetic - properties: - name: - type: keyword - nested_array_regular: - type: nested - nested_array_stored: - type: nested - store_array_source: true - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "name": "A", "nested_array_regular": [ { "b": [ { "c": 10 }, { "c": 100 } ] }, { "b": [ { "c": 20 }, { "c": 200 } ] } ] }' - - '{ "create": { } }' - - '{ "name": "B", "nested_array_stored": [ { "b": [ { "c": 10 }, { "c": 100 } ] }, { "b": [ { "c": 20 }, { "c": 200 } ] } ] }' - - - match: { errors: false } - - - do: - search: - index: test - sort: name - - match: { hits.total.value: 2 } - - match: { hits.hits.0._source.name: A } - - match: { hits.hits.0._source.nested_array_regular.0.b.c: [ 10, 100] } - - match: { hits.hits.0._source.nested_array_regular.1.b.c: [ 20, 200] } - - match: { hits.hits.1._source.name: B } - - match: { hits.hits.1._source.nested_array_stored.0.b.0.c: 10 } - - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } - - match: { hits.hits.1._source.nested_array_stored.1.b.0.c: 20 } - - match: { hits.hits.1._source.nested_array_stored.1.b.1.c: 200 } - ---- -empty nested object sorted as a first document: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - - do: - indices.create: - index: test - body: - settings: - index: - sort.field: "name" - sort.order: "asc" - mappings: - _source: - mode: synthetic - properties: - name: - type: keyword - nested: - type: nested - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "name": "B", "nested": { "a": "b" } }' - - '{ "create": { } }' - - '{ "name": "A" }' - - - match: { errors: false } - - - do: - search: - index: test - sort: name - - - match: { hits.total.value: 2 } - - match: { hits.hits.0._source.name: A } - - match: { hits.hits.1._source.name: B } - - match: { hits.hits.1._source.nested.a: "b" } - --- subobjects auto: - requires: @@ -1337,54 +990,3 @@ subobjects auto: - match: { hits.hits.3._source.id: 4 } - match: { hits.hits.3._source.auto_obj.foo: 40 } - match: { hits.hits.3._source.auto_obj.foo\.bar: 400 } - ---- -# 112156 -stored field under object with store_array_source: - - requires: - cluster_features: ["mapper.source.synthetic_source_stored_fields_advance_fix"] - reason: requires bug fix to be implemented - - - do: - indices.create: - index: test - body: - settings: - index: - sort.field: "name" - sort.order: "asc" - mappings: - _source: - mode: synthetic - properties: - name: - type: keyword - obj: - store_array_source: true - properties: - foo: - type: keyword - store: true - - - do: - bulk: - index: test - refresh: true - body: - - '{ "create": { } }' - - '{ "name": "B", "obj": null }' - - '{ "create": { } }' - - '{ "name": "A", "obj": [ { "foo": "hello_from_the_other_side" } ] }' - - - match: { errors: false } - - - do: - search: - index: test - sort: name - - - match: { hits.total.value: 2 } - - match: { hits.hits.0._source.name: A } - - match: { hits.hits.0._source.obj: [ { "foo": "hello_from_the_other_side" } ] } - - match: { hits.hits.1._source.name: B } - - match: { hits.hits.1._source.obj: null } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml new file mode 100644 index 0000000000000..917f0540c4dd4 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -0,0 +1,732 @@ +--- +object param - object array: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + id: + type: integer + regular: + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + stored: + store_array_source: true + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "id": 1, "regular": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 2, "stored": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + + - do: + search: + index: test + sort: id + + - length: { hits.hits.0._source.regular: 2 } + - match: { hits.hits.0._source.regular.span.id: "1" } + - match: { hits.hits.0._source.regular.trace.id: [ "a", "b" ] } + + - length: { hits.hits.1._source.stored: 2 } + - match: { hits.hits.1._source.stored.0.trace.id: a } + - match: { hits.hits.1._source.stored.0.span.id: "1" } + - match: { hits.hits.1._source.stored.1.trace.id: b } + - match: { hits.hits.1._source.stored.1.span.id: "1" } + + +--- +object param - object array within array: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + stored: + store_array_source: true + properties: + path: + store_array_source: true + properties: + to: + properties: + trace: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "stored": [ { "path": [{ "to": { "trace": "A" } }, { "to": { "trace": "B" } } ] }, { "path": { "to": { "trace": "C" } } } ] }' + + - do: + search: + index: test + + - length: { hits.hits.0._source.stored: 2 } + - match: { hits.hits.0._source.stored.0.path.0.to.trace: A } + - match: { hits.hits.0._source.stored.0.path.1.to.trace: B } + - match: { hits.hits.0._source.stored.1.path.to.trace: C } + + +--- +object param - no object array: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + stored: + store_array_source: true + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "stored": { "trace": { "id": "a" }, "span": { "id": "b" } } }' + + - do: + search: + index: test + + - match: { hits.hits.0._source.stored.trace.id: a } + - match: { hits.hits.0._source.stored.span.id: b } + + +--- +object param - field ordering in object array: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + a: + type: keyword + b: + store_array_source: true + properties: + aa: + type: keyword + bb: + type: keyword + c: + type: keyword + d: + store_array_source: true + properties: + aa: + type: keyword + bb: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "c": 1, "d": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ], "a": 2, "b": [ { "bb": 100, "aa": 200 }, { "aa": 300, "bb": 400 } ] }' + + - do: + search: + index: test + + - length: { hits.hits.0._source: 4 } + - match: { hits.hits.0._source: { "a": "2", "b": [ { "bb": 100, "aa": 200 }, { "aa": 300, "bb": 400 } ], "c": "1", "d": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ] } } + + +--- +object param - nested object array next to other fields: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + a: + type: keyword + b: + properties: + c: + store_array_source: true + properties: + aa: + type: keyword + bb: + type: keyword + d: + properties: + aa: + type: keyword + bb: + type: keyword + e: + type: keyword + f: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "a": 1, "b": { "c": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ], "d": [ { "bb": 100, "aa": 200 }, { "aa": 300, "bb": 400 } ], "e": 1000 }, "f": 2000 }' + + - do: + search: + index: test + + - match: { hits.hits.0._source.a: "1" } + - match: { hits.hits.0._source.b.c: [{ "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 }] } + - match: { hits.hits.0._source.b.d.aa: [ "200", "300" ] } + - match: { hits.hits.0._source.b.d.bb: [ "100", "400" ] } + - match: { hits.hits.0._source.b.e: "1000" } + - match: { hits.hits.0._source.f: "2000" } + + +--- +object param - nested object with stored array: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + nested_array_regular: + type: nested + nested_array_stored: + type: nested + store_array_source: true + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "A", "nested_array_regular": [ { "b": [ { "c": 10 }, { "c": 100 } ] }, { "b": [ { "c": 20 }, { "c": 200 } ] } ] }' + - '{ "create": { } }' + - '{ "name": "B", "nested_array_stored": [ { "b": [ { "c": 10 }, { "c": 100 } ] }, { "b": [ { "c": 20 }, { "c": 200 } ] } ] }' + + - match: { errors: false } + + - do: + search: + index: test + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.nested_array_regular.0.b.c: [ 10, 100] } + - match: { hits.hits.0._source.nested_array_regular.1.b.c: [ 20, 200] } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.nested_array_stored.0.b.0.c: 10 } + - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } + - match: { hits.hits.1._source.nested_array_stored.1.b.0.c: 20 } + - match: { hits.hits.1._source.nested_array_stored.1.b.1.c: 200 } + + +--- +# 112156 +stored field under object with store_array_source: + - requires: + cluster_features: ["mapper.source.synthetic_source_stored_fields_advance_fix"] + reason: requires bug fix to be implemented + + - do: + indices.create: + index: test + body: + settings: + index: + sort.field: "name" + sort.order: "asc" + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + obj: + store_array_source: true + properties: + foo: + type: keyword + store: true + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "B", "obj": null }' + - '{ "create": { } }' + - '{ "name": "A", "obj": [ { "foo": "hello_from_the_other_side" } ] }' + + - match: { errors: false } + + - do: + search: + index: test + sort: name + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.obj: [ { "foo": "hello_from_the_other_side" } ] } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.obj: null } + + +--- +index param - root arrays: + - requires: + cluster_features: ["mapper.synthetic_source_keep"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + synthetic_source_keep: arrays + mappings: + _source: + mode: synthetic + properties: + id: + type: integer + leaf: + type: integer + obj: + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "id": 1, "leaf": [30, 20, 10], "obj": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 2, "leaf": [130, 120, 110], "obj": [ { "trace": { "id": "aa" }, "span": { "id": "2" } }, { "trace": { "id": "bb" }, "span": { "id": "2" } } ] }' + + - do: + search: + index: test + sort: id + + - match: { hits.hits.0._source.id: 1 } + - match: { hits.hits.0._source.leaf: [30, 20, 10] } + - length: { hits.hits.0._source.obj: 2 } + - match: { hits.hits.0._source.obj.0.trace.id: a } + - match: { hits.hits.0._source.obj.0.span.id: "1" } + - match: { hits.hits.0._source.obj.1.trace.id: b } + - match: { hits.hits.0._source.obj.1.span.id: "1" } + + - match: { hits.hits.1._source.id: 2 } + - match: { hits.hits.1._source.leaf: [ 130, 120, 110 ] } + - length: { hits.hits.1._source.obj: 2 } + - match: { hits.hits.1._source.obj.0.trace.id: aa } + - match: { hits.hits.1._source.obj.0.span.id: "2" } + - match: { hits.hits.1._source.obj.1.trace.id: bb } + - match: { hits.hits.1._source.obj.1.span.id: "2" } + + +--- +index param - dynamic root arrays: + - requires: + cluster_features: ["mapper.synthetic_source_keep"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + synthetic_source_keep: arrays + mappings: + _source: + mode: synthetic + properties: + id: + type: integer + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "id": 1, "leaf": [30, 20, 10], "obj": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 2, "leaf": [130, 120, 110], "obj": [ { "trace": { "id": "aa" }, "span": { "id": "2" } }, { "trace": { "id": "bb" }, "span": { "id": "2" } } ] }' + + - do: + search: + index: test + sort: id + + - match: { hits.hits.0._source.id: 1 } + - match: { hits.hits.0._source.leaf: [30, 20, 10] } + - length: { hits.hits.0._source.obj: 2 } + - match: { hits.hits.0._source.obj.0.trace.id: a } + - match: { hits.hits.0._source.obj.0.span.id: "1" } + - match: { hits.hits.0._source.obj.1.trace.id: b } + - match: { hits.hits.0._source.obj.1.span.id: "1" } + + - match: { hits.hits.1._source.id: 2 } + - match: { hits.hits.1._source.leaf: [ 130, 120, 110 ] } + - length: { hits.hits.1._source.obj: 2 } + - match: { hits.hits.1._source.obj.0.trace.id: aa } + - match: { hits.hits.1._source.obj.0.span.id: "2" } + - match: { hits.hits.1._source.obj.1.trace.id: bb } + - match: { hits.hits.1._source.obj.1.span.id: "2" } + + +--- +index param - object array within array: + - requires: + cluster_features: ["mapper.synthetic_source_keep"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + synthetic_source_keep: arrays + mappings: + _source: + mode: synthetic + properties: + stored: + properties: + path: + properties: + to: + properties: + trace: + type: keyword + values: + type: integer + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "stored": [ { "path": [{ "to": { "trace": "A", "values": [2, 1] } }, { "to": { "trace": "B", "values": [2, 1] } } ] }, { "path": { "to": { "trace": "C", "values": 3 } } } ] }' + + - do: + search: + index: test + + - length: { hits.hits.0._source.stored: 2 } + - match: { hits.hits.0._source.stored.0.path.0.to.trace: A } + - match: { hits.hits.0._source.stored.0.path.0.to.values: [2, 1] } + - match: { hits.hits.0._source.stored.0.path.1.to.trace: B } + - match: { hits.hits.0._source.stored.0.path.1.to.values: [2, 1] } + - match: { hits.hits.0._source.stored.1.path.to.trace: C } + - match: { hits.hits.0._source.stored.1.path.to.values: 3 } + + +--- +index param - no object array: + - requires: + cluster_features: ["mapper.synthetic_source_keep"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + synthetic_source_keep: arrays + mappings: + _source: + mode: synthetic + properties: + stored: + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "stored": { "trace": { "id": "a" }, "span": { "id": "b" } } }' + + - do: + search: + index: test + + - match: { hits.hits.0._source.stored.trace.id: a } + - match: { hits.hits.0._source.stored.span.id: b } + + +--- +index param - field ordering: + - requires: + cluster_features: ["mapper.synthetic_source_keep"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + synthetic_source_keep: arrays + mappings: + _source: + mode: synthetic + properties: + a: + type: keyword + b: + properties: + aa: + type: keyword + bb: + type: keyword + c: + type: keyword + d: + properties: + aa: + type: keyword + bb: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "c": [30, 20, 10], "d": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ], "a": 2, "b": [ { "bb": 100, "aa": 200 }, { "aa": 300, "bb": 400 } ] }' + + - do: + search: + index: test + + - length: { hits.hits.0._source: 4 } + - match: { hits.hits.0._source: { "a": "2", "b": [ { "bb": 100, "aa": 200 }, { "aa": 300, "bb": 400 } ], "c": [30, 20, 10], "d": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ] } } + + +--- +index param - nested arrays: + - requires: + cluster_features: ["mapper.synthetic_source_keep"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + synthetic_source_keep: arrays + mappings: + _source: + mode: synthetic + properties: + a: + type: keyword + b: + properties: + c: + properties: + aa: + type: keyword + bb: + type: keyword + d: + type: integer + e: + type: keyword + f: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "a": 1, "b": { "c": [ { "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 } ], "d": [ 300, 200, 100 ], "e": 1000 }, "f": 2000 }' + - '{ "create": { } }' + - '{ "a": 11, "b": { "c": [ { "bb": 110, "aa": 120 }, { "aa": 130, "bb": 140 } ], "d": [ 1300, 1200, 1100 ], "e": 11000 }, "f": 12000 }' + + + - do: + search: + index: test + sort: a + + - match: { hits.hits.0._source.a: "1" } + - match: { hits.hits.0._source.b.c: [{ "bb": 10, "aa": 20 }, { "aa": 30, "bb": 40 }] } + - match: { hits.hits.0._source.b.d: [ 300, 200, 100 ] } + - match: { hits.hits.0._source.b.e: "1000" } + - match: { hits.hits.0._source.f: "2000" } + + - match: { hits.hits.1._source.a: "11" } + - match: { hits.hits.1._source.b.c: [ { "bb": 110, "aa": 120 }, { "aa": 130, "bb": 140 } ] } + - match: { hits.hits.1._source.b.d: [ 1300, 1200, 1100 ] } + - match: { hits.hits.1._source.b.e: "11000" } + - match: { hits.hits.1._source.f: "12000" } + +--- +index param - nested object with stored array: + - requires: + cluster_features: ["mapper.synthetic_source_keep"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + synthetic_source_keep: arrays + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + nested: + type: nested + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "A", "nested": [ { "b": [ { "c": 10 }, { "c": 100 } ] }, { "b": [ { "c": 20 }, { "c": 200 } ] } ] }' + - '{ "create": { } }' + - '{ "name": "B", "nested": [ { "b": [ { "c": 30 }, { "c": 300 } ] }, { "b": [ { "c": 40 }, { "c": 400 } ] } ] }' + + - match: { errors: false } + + - do: + search: + index: test + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.nested.0.b.0.c: 10 } + - match: { hits.hits.0._source.nested.0.b.1.c: 100 } + - match: { hits.hits.0._source.nested.1.b.0.c: 20 } + - match: { hits.hits.0._source.nested.1.b.1.c: 200 } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.nested.0.b.0.c: 30 } + - match: { hits.hits.1._source.nested.0.b.1.c: 300 } + - match: { hits.hits.1._source.nested.1.b.0.c: 40 } + - match: { hits.hits.1._source.nested.1.b.1.c: 400 } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index fe6616cb4fb8e..6adf181014023 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -181,6 +181,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING, IndexSettings.PREFER_ILM_SETTING, DataStreamFailureStoreDefinition.FAILURE_STORE_DEFINITION_VERSION_SETTING, + FieldMapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 944d50f7ea06c..509d37fd6077d 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.node.Node; @@ -793,6 +794,16 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private final IndexRouting indexRouting; + /** + * The default mode for storing source, for all mappers not overriding this setting. + * This is only relevant for indexes configured with synthetic-source code. + */ + public Mapper.SourceKeepMode sourceKeepMode() { + return sourceKeepMode; + } + + private final Mapper.SourceKeepMode sourceKeepMode; + /** * Returns the default search fields for this index. */ @@ -922,6 +933,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mappingFieldNameLengthLimit = scopedSettings.get(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING); mappingDimensionFieldsLimit = scopedSettings.get(INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING); indexRouting = IndexRouting.fromIndexMetadata(indexMetadata); + sourceKeepMode = scopedSettings.get(Mapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING); es87TSDBCodecEnabled = scopedSettings.get(TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING); scopedSettings.addSettingsUpdateConsumer( diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 35f0130c58706..f020b8128bb13 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -297,7 +297,7 @@ static void parseObjectOrNested(DocumentParserContext context) throws IOExceptio if (context.parent().isNested()) { // Handle a nested object that doesn't contain an array. Arrays are handled in #parseNonDynamicArray. - if (context.parent().storeArraySource() && context.mappingLookup().isSourceSynthetic() && context.getClonedSource() == false) { + if (context.parent().storeArraySource() && context.canAddIgnoredField()) { Tuple tuple = XContentDataHelper.cloneSubContext(context); context.addIgnoredField( new IgnoredSourceFieldMapper.NameValue( @@ -686,11 +686,16 @@ private static void parseNonDynamicArray( // Check if we need to record the array source. This only applies to synthetic source. if (context.canAddIgnoredField()) { boolean objectRequiresStoringSource = mapper instanceof ObjectMapper objectMapper - && (objectMapper.storeArraySource() || objectMapper.dynamic == ObjectMapper.Dynamic.RUNTIME); + && (objectMapper.storeArraySource() + || (context.sourceKeepModeFromIndexSettings() == Mapper.SourceKeepMode.ARRAYS + && objectMapper instanceof NestedObjectMapper == false) + || objectMapper.dynamic == ObjectMapper.Dynamic.RUNTIME); boolean fieldWithFallbackSyntheticSource = mapper instanceof FieldMapper fieldMapper && fieldMapper.syntheticSourceMode() == FieldMapper.SyntheticSourceMode.FALLBACK; + boolean fieldWithStoredArraySource = mapper instanceof FieldMapper fieldMapper + && context.sourceKeepModeFromIndexSettings() == Mapper.SourceKeepMode.ARRAYS; boolean dynamicRuntimeContext = context.dynamic() == ObjectMapper.Dynamic.RUNTIME; - if (objectRequiresStoringSource || fieldWithFallbackSyntheticSource || dynamicRuntimeContext) { + if (objectRequiresStoringSource || fieldWithFallbackSyntheticSource || dynamicRuntimeContext || fieldWithStoredArraySource) { Tuple tuple = XContentDataHelper.cloneSubContext(context); context.addIgnoredField( IgnoredSourceFieldMapper.NameValue.fromContext( diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 248369b249007..3a84162b86c27 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -329,6 +329,10 @@ public final boolean canAddIgnoredField() { return mappingLookup.isSourceSynthetic() && clonedSource == false; } + Mapper.SourceKeepMode sourceKeepModeFromIndexSettings() { + return indexSettings().sourceKeepMode(); + } + /** * Description on the document being parsed used in error messages. Not * called unless there is an error. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 2c1e01c3cd196..9469ee29ff0a3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -10,7 +10,9 @@ import org.apache.lucene.document.FieldType; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.StringLiteralDeduplicator; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.xcontent.ToXContentFragment; @@ -22,6 +24,57 @@ public abstract class Mapper implements ToXContentFragment, Iterable { + public static final NodeFeature SYNTHETIC_SOURCE_KEEP_FEATURE = new NodeFeature("mapper.synthetic_source_keep"); + + static final String SYNTHETIC_SOURCE_KEEP_PARAM = "synthetic_source_keep"; + + // Only relevant for synthetic source mode. + public enum SourceKeepMode { + NONE("none"), // No source recording + ARRAYS("arrays"), // Store source for arrays of mapped fields + ALL("all"); // Store source for both singletons and arrays of mapped fields + + SourceKeepMode(String name) { + this.name = name; + } + + static SourceKeepMode from(String input) { + if (input.equals(NONE.name)) { + return NONE; + } + if (input.equals(ALL.name)) { + return ALL; + } + if (input.equals(ARRAYS.name)) { + return ARRAYS; + } + throw new IllegalArgumentException("Unknown " + SYNTHETIC_SOURCE_KEEP_PARAM + " value [" + input + "]"); + } + + @Override + public String toString() { + return name; + } + + private final String name; + } + + // Only relevant for indexes configured with synthetic source mode. Otherwise, it has no effect. + // Controls the default behavior for storing the source of leaf fields and objects, in singleton or array form. + // Setting to SourceKeepMode.ALL is equivalent to disabling synthetic source, so this is not allowed. + public static final Setting SYNTHETIC_SOURCE_KEEP_INDEX_SETTING = Setting.enumSetting( + SourceKeepMode.class, + "index.mapping.synthetic_source_keep", + SourceKeepMode.NONE, + value -> { + if (value == SourceKeepMode.ALL) { + throw new IllegalArgumentException("index.mapping.synthetic_source_keep can't be set to [" + value.toString() + "]"); + } + }, + Setting.Property.IndexScope, + Setting.Property.ServerlessPublic + ); + public abstract static class Builder { private String leafName; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 63bbef061c61f..2e250726b98ca 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -34,7 +34,8 @@ public Set getFeatures() { BooleanFieldMapper.BOOLEAN_DIMENSION, ObjectMapper.SUBOBJECTS_AUTO, KeywordFieldMapper.KEYWORD_NORMALIZER_SYNTHETIC_SOURCE, - SourceFieldMapper.SYNTHETIC_SOURCE_STORED_FIELDS_ADVANCE_FIX + SourceFieldMapper.SYNTHETIC_SOURCE_STORED_FIELDS_ADVANCE_FIX, + Mapper.SYNTHETIC_SOURCE_KEEP_FEATURE ); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index dcb5cd1711c8c..61c4068cedf4a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -48,6 +48,14 @@ private String getSyntheticSourceWithFieldLimit(CheckedConsumer b.field("my_value", value))); @@ -485,6 +493,108 @@ public void testMixedDisabledEnabledObjects() throws IOException { ); } + public void testIndexStoredArraySourceRootValueArray() throws IOException { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + b.startObject("int_value").field("type", "integer").endObject(); + b.startObject("bool_value").field("type", "boolean").endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.array("int_value", new int[] { 30, 20, 10 }); + b.field("bool_value", true); + }); + assertEquals(""" + {"bool_value":true,"int_value":[30,20,10]}""", syntheticSource); + } + + public void testIndexStoredArraySourceRootObjectArray() throws IOException { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("int_value").field("type", "integer").endObject(); + } + b.endObject(); + } + b.endObject(); + b.startObject("bool_value").field("type", "boolean").endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + b.startObject().field("int_value", 10).endObject(); + b.startObject().field("int_value", 20).endObject(); + b.endArray(); + b.field("bool_value", true); + }); + assertEquals(""" + {"bool_value":true,"path":[{"int_value":10},{"int_value":20}]}""", syntheticSource); + } + + public void testIndexStoredArraySourceNestedValueArray() throws IOException { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("int_value").field("type", "integer").endObject(); + b.startObject("bool_value").field("type", "boolean").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.array("int_value", new int[] { 30, 20, 10 }); + b.field("bool_value", true); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"bool_value":true,"int_value":[30,20,10]}}""", syntheticSource); + } + + public void testIndexStoredArraySourceNestedObjectArray() throws IOException { + DocumentMapper documentMapper = createMapperServiceWithStoredArraySource(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("to"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("int_value").field("type", "integer").endObject(); + } + b.endObject(); + } + b.endObject(); + b.startObject("bool_value").field("type", "boolean").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startArray("to"); + b.startObject().field("int_value", 10).endObject(); + b.startObject().field("int_value", 20).endObject(); + b.endArray(); + b.field("bool_value", true); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"bool_value":true,"to":[{"int_value":10},{"int_value":20}]}}""", syntheticSource); + } + public void testRootArray() throws IOException { DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { b.startObject("path"); From 9b2bad394966e028a07dff7bfcb56f909a9132e8 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 4 Sep 2024 11:27:17 -0400 Subject: [PATCH 043/115] Update the gradlew stuff in plugins/examples (#112517) --- .../gradle/wrapper/gradle-wrapper.jar | Bin 59536 -> 43583 bytes plugins/examples/gradlew | 44 ++++++++++++------ plugins/examples/gradlew.bat | 37 ++++++++------- 3 files changed, 52 insertions(+), 29 deletions(-) diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.jar b/plugins/examples/gradle/wrapper/gradle-wrapper.jar index 7454180f2ae8848c63b8b4dea2cb829da983f2fa..a4b76b9530d66f5e68d973ea569d8e19de379189 100644 GIT binary patch literal 43583 zcma&N1CXTcmMvW9vTb(Rwr$&4wr$(C?dmSu>@vG-+vuvg^_??!{yS%8zW-#zn-LkA z5&1^$^{lnmUON?}LBF8_K|(?T0Ra(xUH{($5eN!MR#ZihR#HxkUPe+_R8Cn`RRs(P z_^*#_XlXmGv7!4;*Y%p4nw?{bNp@UZHv1?Um8r6)Fei3p@ClJn0ECfg1hkeuUU@Or zDaPa;U3fE=3L}DooL;8f;P0ipPt0Z~9P0)lbStMS)ag54=uL9ia-Lm3nh|@(Y?B`; zx_#arJIpXH!U{fbCbI^17}6Ri*H<>OLR%c|^mh8+)*h~K8Z!9)DPf zR2h?lbDZQ`p9P;&DQ4F0sur@TMa!Y}S8irn(%d-gi0*WxxCSk*A?3lGh=gcYN?FGl z7D=Js!i~0=u3rox^eO3i@$0=n{K1lPNU zwmfjRVmLOCRfe=seV&P*1Iq=^i`502keY8Uy-WNPwVNNtJFx?IwAyRPZo2Wo1+S(xF37LJZ~%i)kpFQ3Fw=mXfd@>%+)RpYQLnr}B~~zoof(JVm^^&f zxKV^+3D3$A1G;qh4gPVjhrC8e(VYUHv#dy^)(RoUFM?o%W-EHxufuWf(l*@-l+7vt z=l`qmR56K~F|v<^Pd*p~1_y^P0P^aPC##d8+HqX4IR1gu+7w#~TBFphJxF)T$2WEa zxa?H&6=Qe7d(#tha?_1uQys2KtHQ{)Qco)qwGjrdNL7thd^G5i8Os)CHqc>iOidS} z%nFEDdm=GXBw=yXe1W-ShHHFb?Cc70+$W~z_+}nAoHFYI1MV1wZegw*0y^tC*s%3h zhD3tN8b=Gv&rj}!SUM6|ajSPp*58KR7MPpI{oAJCtY~JECm)*m_x>AZEu>DFgUcby z1Qaw8lU4jZpQ_$;*7RME+gq1KySGG#Wql>aL~k9tLrSO()LWn*q&YxHEuzmwd1?aAtI zBJ>P=&$=l1efe1CDU;`Fd+_;&wI07?V0aAIgc(!{a z0Jg6Y=inXc3^n!U0Atk`iCFIQooHqcWhO(qrieUOW8X(x?(RD}iYDLMjSwffH2~tB z)oDgNBLB^AJBM1M^c5HdRx6fBfka`(LD-qrlh5jqH~);#nw|iyp)()xVYak3;Ybik z0j`(+69aK*B>)e_p%=wu8XC&9e{AO4c~O1U`5X9}?0mrd*m$_EUek{R?DNSh(=br# z#Q61gBzEpmy`$pA*6!87 zSDD+=@fTY7<4A?GLqpA?Pb2z$pbCc4B4zL{BeZ?F-8`s$?>*lXXtn*NC61>|*w7J* z$?!iB{6R-0=KFmyp1nnEmLsA-H0a6l+1uaH^g%c(p{iT&YFrbQ$&PRb8Up#X3@Zsk zD^^&LK~111%cqlP%!_gFNa^dTYT?rhkGl}5=fL{a`UViaXWI$k-UcHJwmaH1s=S$4 z%4)PdWJX;hh5UoK?6aWoyLxX&NhNRqKam7tcOkLh{%j3K^4Mgx1@i|Pi&}<^5>hs5 zm8?uOS>%)NzT(%PjVPGa?X%`N2TQCKbeH2l;cTnHiHppPSJ<7y-yEIiC!P*ikl&!B z%+?>VttCOQM@ShFguHVjxX^?mHX^hSaO_;pnyh^v9EumqSZTi+#f&_Vaija0Q-e*| z7ulQj6Fs*bbmsWp{`auM04gGwsYYdNNZcg|ph0OgD>7O}Asn7^Z=eI>`$2*v78;sj-}oMoEj&@)9+ycEOo92xSyY344^ z11Hb8^kdOvbf^GNAK++bYioknrpdN>+u8R?JxG=!2Kd9r=YWCOJYXYuM0cOq^FhEd zBg2puKy__7VT3-r*dG4c62Wgxi52EMCQ`bKgf*#*ou(D4-ZN$+mg&7$u!! z-^+Z%;-3IDwqZ|K=ah85OLwkO zKxNBh+4QHh)u9D?MFtpbl)us}9+V!D%w9jfAMYEb>%$A;u)rrI zuBudh;5PN}_6J_}l55P3l_)&RMlH{m!)ai-i$g)&*M`eN$XQMw{v^r@-125^RRCF0 z^2>|DxhQw(mtNEI2Kj(;KblC7x=JlK$@78`O~>V!`|1Lm-^JR$-5pUANAnb(5}B}JGjBsliK4& zk6y(;$e&h)lh2)L=bvZKbvh@>vLlreBdH8No2>$#%_Wp1U0N7Ank!6$dFSi#xzh|( zRi{Uw%-4W!{IXZ)fWx@XX6;&(m_F%c6~X8hx=BN1&q}*( zoaNjWabE{oUPb!Bt$eyd#$5j9rItB-h*5JiNi(v^e|XKAj*8(k<5-2$&ZBR5fF|JA z9&m4fbzNQnAU}r8ab>fFV%J0z5awe#UZ|bz?Ur)U9bCIKWEzi2%A+5CLqh?}K4JHi z4vtM;+uPsVz{Lfr;78W78gC;z*yTch~4YkLr&m-7%-xc ztw6Mh2d>_iO*$Rd8(-Cr1_V8EO1f*^@wRoSozS) zy1UoC@pruAaC8Z_7~_w4Q6n*&B0AjOmMWa;sIav&gu z|J5&|{=a@vR!~k-OjKEgPFCzcJ>#A1uL&7xTDn;{XBdeM}V=l3B8fE1--DHjSaxoSjNKEM9|U9#m2<3>n{Iuo`r3UZp;>GkT2YBNAh|b z^jTq-hJp(ebZh#Lk8hVBP%qXwv-@vbvoREX$TqRGTgEi$%_F9tZES@z8Bx}$#5eeG zk^UsLBH{bc2VBW)*EdS({yw=?qmevwi?BL6*=12k9zM5gJv1>y#ML4!)iiPzVaH9% zgSImetD@dam~e>{LvVh!phhzpW+iFvWpGT#CVE5TQ40n%F|p(sP5mXxna+Ev7PDwA zamaV4m*^~*xV+&p;W749xhb_X=$|LD;FHuB&JL5?*Y2-oIT(wYY2;73<^#46S~Gx| z^cez%V7x$81}UWqS13Gz80379Rj;6~WdiXWOSsdmzY39L;Hg3MH43o*y8ibNBBH`(av4|u;YPq%{R;IuYow<+GEsf@R?=@tT@!}?#>zIIn0CoyV!hq3mw zHj>OOjfJM3F{RG#6ujzo?y32m^tgSXf@v=J$ELdJ+=5j|=F-~hP$G&}tDZsZE?5rX ztGj`!S>)CFmdkccxM9eGIcGnS2AfK#gXwj%esuIBNJQP1WV~b~+D7PJTmWGTSDrR` zEAu4B8l>NPuhsk5a`rReSya2nfV1EK01+G!x8aBdTs3Io$u5!6n6KX%uv@DxAp3F@{4UYg4SWJtQ-W~0MDb|j-$lwVn znAm*Pl!?Ps&3wO=R115RWKb*JKoexo*)uhhHBncEDMSVa_PyA>k{Zm2(wMQ(5NM3# z)jkza|GoWEQo4^s*wE(gHz?Xsg4`}HUAcs42cM1-qq_=+=!Gk^y710j=66(cSWqUe zklbm8+zB_syQv5A2rj!Vbw8;|$@C!vfNmNV!yJIWDQ>{+2x zKjuFX`~~HKG~^6h5FntRpnnHt=D&rq0>IJ9#F0eM)Y-)GpRjiN7gkA8wvnG#K=q{q z9dBn8_~wm4J<3J_vl|9H{7q6u2A!cW{bp#r*-f{gOV^e=8S{nc1DxMHFwuM$;aVI^ zz6A*}m8N-&x8;aunp1w7_vtB*pa+OYBw=TMc6QK=mbA-|Cf* zvyh8D4LRJImooUaSb7t*fVfih<97Gf@VE0|z>NcBwBQze);Rh!k3K_sfunToZY;f2 z^HmC4KjHRVg+eKYj;PRN^|E0>Gj_zagfRbrki68I^#~6-HaHg3BUW%+clM1xQEdPYt_g<2K+z!$>*$9nQ>; zf9Bei{?zY^-e{q_*|W#2rJG`2fy@{%6u0i_VEWTq$*(ZN37|8lFFFt)nCG({r!q#9 z5VK_kkSJ3?zOH)OezMT{!YkCuSSn!K#-Rhl$uUM(bq*jY? zi1xbMVthJ`E>d>(f3)~fozjg^@eheMF6<)I`oeJYx4*+M&%c9VArn(OM-wp%M<-`x z7sLP1&3^%Nld9Dhm@$3f2}87!quhI@nwd@3~fZl_3LYW-B?Ia>ui`ELg z&Qfe!7m6ze=mZ`Ia9$z|ARSw|IdMpooY4YiPN8K z4B(ts3p%2i(Td=tgEHX z0UQ_>URBtG+-?0E;E7Ld^dyZ;jjw0}XZ(}-QzC6+NN=40oDb2^v!L1g9xRvE#@IBR zO!b-2N7wVfLV;mhEaXQ9XAU+>=XVA6f&T4Z-@AX!leJ8obP^P^wP0aICND?~w&NykJ#54x3_@r7IDMdRNy4Hh;h*!u(Ol(#0bJdwEo$5437-UBjQ+j=Ic>Q2z` zJNDf0yO6@mr6y1#n3)s(W|$iE_i8r@Gd@!DWDqZ7J&~gAm1#~maIGJ1sls^gxL9LLG_NhU!pTGty!TbhzQnu)I*S^54U6Yu%ZeCg`R>Q zhBv$n5j0v%O_j{QYWG!R9W?5_b&67KB$t}&e2LdMvd(PxN6Ir!H4>PNlerpBL>Zvyy!yw z-SOo8caEpDt(}|gKPBd$qND5#a5nju^O>V&;f890?yEOfkSG^HQVmEbM3Ugzu+UtH zC(INPDdraBN?P%kE;*Ae%Wto&sgw(crfZ#Qy(<4nk;S|hD3j{IQRI6Yq|f^basLY; z-HB&Je%Gg}Jt@={_C{L$!RM;$$|iD6vu#3w?v?*;&()uB|I-XqEKqZPS!reW9JkLewLb!70T7n`i!gNtb1%vN- zySZj{8-1>6E%H&=V}LM#xmt`J3XQoaD|@XygXjdZ1+P77-=;=eYpoEQ01B@L*a(uW zrZeZz?HJsw_4g0vhUgkg@VF8<-X$B8pOqCuWAl28uB|@r`19DTUQQsb^pfqB6QtiT z*`_UZ`fT}vtUY#%sq2{rchyfu*pCg;uec2$-$N_xgjZcoumE5vSI{+s@iLWoz^Mf; zuI8kDP{!XY6OP~q5}%1&L}CtfH^N<3o4L@J@zg1-mt{9L`s^z$Vgb|mr{@WiwAqKg zp#t-lhrU>F8o0s1q_9y`gQNf~Vb!F%70f}$>i7o4ho$`uciNf=xgJ>&!gSt0g;M>*x4-`U)ysFW&Vs^Vk6m%?iuWU+o&m(2Jm26Y(3%TL; zA7T)BP{WS!&xmxNw%J=$MPfn(9*^*TV;$JwRy8Zl*yUZi8jWYF>==j~&S|Xinsb%c z2?B+kpet*muEW7@AzjBA^wAJBY8i|#C{WtO_or&Nj2{=6JTTX05}|H>N2B|Wf!*3_ z7hW*j6p3TvpghEc6-wufFiY!%-GvOx*bZrhZu+7?iSrZL5q9}igiF^*R3%DE4aCHZ zqu>xS8LkW+Auv%z-<1Xs92u23R$nk@Pk}MU5!gT|c7vGlEA%G^2th&Q*zfg%-D^=f z&J_}jskj|Q;73NP4<4k*Y%pXPU2Thoqr+5uH1yEYM|VtBPW6lXaetokD0u z9qVek6Q&wk)tFbQ8(^HGf3Wp16gKmr>G;#G(HRBx?F`9AIRboK+;OfHaLJ(P>IP0w zyTbTkx_THEOs%Q&aPrxbZrJlio+hCC_HK<4%f3ZoSAyG7Dn`=X=&h@m*|UYO-4Hq0 z-Bq&+Ie!S##4A6OGoC~>ZW`Y5J)*ouaFl_e9GA*VSL!O_@xGiBw!AF}1{tB)z(w%c zS1Hmrb9OC8>0a_$BzeiN?rkPLc9%&;1CZW*4}CDDNr2gcl_3z+WC15&H1Zc2{o~i) z)LLW=WQ{?ricmC`G1GfJ0Yp4Dy~Ba;j6ZV4r{8xRs`13{dD!xXmr^Aga|C=iSmor% z8hi|pTXH)5Yf&v~exp3o+sY4B^^b*eYkkCYl*T{*=-0HniSA_1F53eCb{x~1k3*`W zr~};p1A`k{1DV9=UPnLDgz{aJH=-LQo<5%+Em!DNN252xwIf*wF_zS^!(XSm(9eoj z=*dXG&n0>)_)N5oc6v!>-bd(2ragD8O=M|wGW z!xJQS<)u70m&6OmrF0WSsr@I%T*c#Qo#Ha4d3COcX+9}hM5!7JIGF>7<~C(Ear^Sn zm^ZFkV6~Ula6+8S?oOROOA6$C&q&dp`>oR-2Ym3(HT@O7Sd5c~+kjrmM)YmgPH*tL zX+znN>`tv;5eOfX?h{AuX^LK~V#gPCu=)Tigtq9&?7Xh$qN|%A$?V*v=&-2F$zTUv z`C#WyIrChS5|Kgm_GeudCFf;)!WH7FI60j^0o#65o6`w*S7R@)88n$1nrgU(oU0M9 zx+EuMkC>(4j1;m6NoGqEkpJYJ?vc|B zOlwT3t&UgL!pX_P*6g36`ZXQ; z9~Cv}ANFnJGp(;ZhS(@FT;3e)0)Kp;h^x;$*xZn*k0U6-&FwI=uOGaODdrsp-!K$Ac32^c{+FhI-HkYd5v=`PGsg%6I`4d9Jy)uW0y%) zm&j^9WBAp*P8#kGJUhB!L?a%h$hJgQrx!6KCB_TRo%9{t0J7KW8!o1B!NC)VGLM5! zpZy5Jc{`r{1e(jd%jsG7k%I+m#CGS*BPA65ZVW~fLYw0dA-H_}O zrkGFL&P1PG9p2(%QiEWm6x;U-U&I#;Em$nx-_I^wtgw3xUPVVu zqSuKnx&dIT-XT+T10p;yjo1Y)z(x1fb8Dzfn8e yu?e%!_ptzGB|8GrCfu%p?(_ zQccdaaVK$5bz;*rnyK{_SQYM>;aES6Qs^lj9lEs6_J+%nIiuQC*fN;z8md>r_~Mfl zU%p5Dt_YT>gQqfr@`cR!$NWr~+`CZb%dn;WtzrAOI>P_JtsB76PYe*<%H(y>qx-`Kq!X_; z<{RpAqYhE=L1r*M)gNF3B8r(<%8mo*SR2hu zccLRZwGARt)Hlo1euqTyM>^!HK*!Q2P;4UYrysje@;(<|$&%vQekbn|0Ruu_Io(w4#%p6ld2Yp7tlA`Y$cciThP zKzNGIMPXX%&Ud0uQh!uQZz|FB`4KGD?3!ND?wQt6!n*f4EmCoJUh&b?;B{|lxs#F- z31~HQ`SF4x$&v00@(P+j1pAaj5!s`)b2RDBp*PB=2IB>oBF!*6vwr7Dp%zpAx*dPr zb@Zjq^XjN?O4QcZ*O+8>)|HlrR>oD*?WQl5ri3R#2?*W6iJ>>kH%KnnME&TT@ZzrHS$Q%LC?n|e>V+D+8D zYc4)QddFz7I8#}y#Wj6>4P%34dZH~OUDb?uP%-E zwjXM(?Sg~1!|wI(RVuxbu)-rH+O=igSho_pDCw(c6b=P zKk4ATlB?bj9+HHlh<_!&z0rx13K3ZrAR8W)!@Y}o`?a*JJsD+twZIv`W)@Y?Amu_u zz``@-e2X}27$i(2=9rvIu5uTUOVhzwu%mNazS|lZb&PT;XE2|B&W1>=B58#*!~D&) zfVmJGg8UdP*fx(>Cj^?yS^zH#o-$Q-*$SnK(ZVFkw+er=>N^7!)FtP3y~Xxnu^nzY zikgB>Nj0%;WOltWIob|}%lo?_C7<``a5hEkx&1ku$|)i>Rh6@3h*`slY=9U}(Ql_< zaNG*J8vb&@zpdhAvv`?{=zDedJ23TD&Zg__snRAH4eh~^oawdYi6A3w8<Ozh@Kw)#bdktM^GVb zrG08?0bG?|NG+w^&JvD*7LAbjED{_Zkc`3H!My>0u5Q}m!+6VokMLXxl`Mkd=g&Xx z-a>m*#G3SLlhbKB!)tnzfWOBV;u;ftU}S!NdD5+YtOjLg?X}dl>7m^gOpihrf1;PY zvll&>dIuUGs{Qnd- zwIR3oIrct8Va^Tm0t#(bJD7c$Z7DO9*7NnRZorrSm`b`cxz>OIC;jSE3DO8`hX955ui`s%||YQtt2 z5DNA&pG-V+4oI2s*x^>-$6J?p=I>C|9wZF8z;VjR??Icg?1w2v5Me+FgAeGGa8(3S z4vg*$>zC-WIVZtJ7}o9{D-7d>zCe|z#<9>CFve-OPAYsneTb^JH!Enaza#j}^mXy1 z+ULn^10+rWLF6j2>Ya@@Kq?26>AqK{A_| zQKb*~F1>sE*=d?A?W7N2j?L09_7n+HGi{VY;MoTGr_)G9)ot$p!-UY5zZ2Xtbm=t z@dpPSGwgH=QtIcEulQNI>S-#ifbnO5EWkI;$A|pxJd885oM+ zGZ0_0gDvG8q2xebj+fbCHYfAXuZStH2j~|d^sBAzo46(K8n59+T6rzBwK)^rfPT+B zyIFw)9YC-V^rhtK`!3jrhmW-sTmM+tPH+;nwjL#-SjQPUZ53L@A>y*rt(#M(qsiB2 zx6B)dI}6Wlsw%bJ8h|(lhkJVogQZA&n{?Vgs6gNSXzuZpEyu*xySy8ro07QZ7Vk1!3tJphN_5V7qOiyK8p z#@jcDD8nmtYi1^l8ml;AF<#IPK?!pqf9D4moYk>d99Im}Jtwj6c#+A;f)CQ*f-hZ< z=p_T86jog%!p)D&5g9taSwYi&eP z#JuEK%+NULWus;0w32-SYFku#i}d~+{Pkho&^{;RxzP&0!RCm3-9K6`>KZpnzS6?L z^H^V*s!8<>x8bomvD%rh>Zp3>Db%kyin;qtl+jAv8Oo~1g~mqGAC&Qi_wy|xEt2iz zWAJEfTV%cl2Cs<1L&DLRVVH05EDq`pH7Oh7sR`NNkL%wi}8n>IXcO40hp+J+sC!W?!krJf!GJNE8uj zg-y~Ns-<~D?yqbzVRB}G>0A^f0!^N7l=$m0OdZuqAOQqLc zX?AEGr1Ht+inZ-Qiwnl@Z0qukd__a!C*CKuGdy5#nD7VUBM^6OCpxCa2A(X;e0&V4 zM&WR8+wErQ7UIc6LY~Q9x%Sn*Tn>>P`^t&idaOEnOd(Ufw#>NoR^1QdhJ8s`h^|R_ zXX`c5*O~Xdvh%q;7L!_!ohf$NfEBmCde|#uVZvEo>OfEq%+Ns7&_f$OR9xsihRpBb z+cjk8LyDm@U{YN>+r46?nn{7Gh(;WhFw6GAxtcKD+YWV?uge>;+q#Xx4!GpRkVZYu zzsF}1)7$?%s9g9CH=Zs+B%M_)+~*j3L0&Q9u7!|+T`^O{xE6qvAP?XWv9_MrZKdo& z%IyU)$Q95AB4!#hT!_dA>4e@zjOBD*Y=XjtMm)V|+IXzjuM;(l+8aA5#Kaz_$rR6! zj>#&^DidYD$nUY(D$mH`9eb|dtV0b{S>H6FBfq>t5`;OxA4Nn{J(+XihF(stSche7$es&~N$epi&PDM_N`As;*9D^L==2Q7Z2zD+CiU(|+-kL*VG+&9!Yb3LgPy?A zm7Z&^qRG_JIxK7-FBzZI3Q<;{`DIxtc48k> zc|0dmX;Z=W$+)qE)~`yn6MdoJ4co;%!`ddy+FV538Y)j(vg}5*k(WK)KWZ3WaOG!8 z!syGn=s{H$odtpqFrT#JGM*utN7B((abXnpDM6w56nhw}OY}0TiTG1#f*VFZr+^-g zbP10`$LPq_;PvrA1XXlyx2uM^mrjTzX}w{yuLo-cOClE8MMk47T25G8M!9Z5ypOSV zAJUBGEg5L2fY)ZGJb^E34R2zJ?}Vf>{~gB!8=5Z) z9y$>5c)=;o0HeHHSuE4U)#vG&KF|I%-cF6f$~pdYJWk_dD}iOA>iA$O$+4%@>JU08 zS`ep)$XLPJ+n0_i@PkF#ri6T8?ZeAot$6JIYHm&P6EB=BiaNY|aA$W0I+nz*zkz_z zkEru!tj!QUffq%)8y0y`T&`fuus-1p>=^hnBiBqD^hXrPs`PY9tU3m0np~rISY09> z`P3s=-kt_cYcxWd{de@}TwSqg*xVhp;E9zCsnXo6z z?f&Sv^U7n4`xr=mXle94HzOdN!2kB~4=%)u&N!+2;z6UYKUDqi-s6AZ!haB;@&B`? z_TRX0%@suz^TRdCb?!vNJYPY8L_}&07uySH9%W^Tc&1pia6y1q#?*Drf}GjGbPjBS zbOPcUY#*$3sL2x4v_i*Y=N7E$mR}J%|GUI(>WEr+28+V z%v5{#e!UF*6~G&%;l*q*$V?&r$Pp^sE^i-0$+RH3ERUUdQ0>rAq2(2QAbG}$y{de( z>{qD~GGuOk559Y@%$?N^1ApVL_a704>8OD%8Y%8B;FCt%AoPu8*D1 zLB5X>b}Syz81pn;xnB}%0FnwazlWfUV)Z-~rZg6~b z6!9J$EcE&sEbzcy?CI~=boWA&eeIa%z(7SE^qgVLz??1Vbc1*aRvc%Mri)AJaAG!p z$X!_9Ds;Zz)f+;%s&dRcJt2==P{^j3bf0M=nJd&xwUGlUFn?H=2W(*2I2Gdu zv!gYCwM10aeus)`RIZSrCK=&oKaO_Ry~D1B5!y0R=%!i2*KfXGYX&gNv_u+n9wiR5 z*e$Zjju&ODRW3phN925%S(jL+bCHv6rZtc?!*`1TyYXT6%Ju=|X;6D@lq$8T zW{Y|e39ioPez(pBH%k)HzFITXHvnD6hw^lIoUMA;qAJ^CU?top1fo@s7xT13Fvn1H z6JWa-6+FJF#x>~+A;D~;VDs26>^oH0EI`IYT2iagy23?nyJ==i{g4%HrAf1-*v zK1)~@&(KkwR7TL}L(A@C_S0G;-GMDy=MJn2$FP5s<%wC)4jC5PXoxrQBFZ_k0P{{s@sz+gX`-!=T8rcB(=7vW}^K6oLWMmp(rwDh}b zwaGGd>yEy6fHv%jM$yJXo5oMAQ>c9j`**}F?MCry;T@47@r?&sKHgVe$MCqk#Z_3S z1GZI~nOEN*P~+UaFGnj{{Jo@16`(qVNtbU>O0Hf57-P>x8Jikp=`s8xWs^dAJ9lCQ z)GFm+=OV%AMVqVATtN@|vp61VVAHRn87}%PC^RAzJ%JngmZTasWBAWsoAqBU+8L8u z4A&Pe?fmTm0?mK-BL9t+{y7o(7jm+RpOhL9KnY#E&qu^}B6=K_dB}*VlSEiC9fn)+V=J;OnN)Ta5v66ic1rG+dGAJ1 z1%Zb_+!$=tQ~lxQrzv3x#CPb?CekEkA}0MYSgx$Jdd}q8+R=ma$|&1a#)TQ=l$1tQ z=tL9&_^vJ)Pk}EDO-va`UCT1m#Uty1{v^A3P~83_#v^ozH}6*9mIjIr;t3Uv%@VeW zGL6(CwCUp)Jq%G0bIG%?{_*Y#5IHf*5M@wPo6A{$Um++Co$wLC=J1aoG93&T7Ho}P z=mGEPP7GbvoG!uD$k(H3A$Z))+i{Hy?QHdk>3xSBXR0j!11O^mEe9RHmw!pvzv?Ua~2_l2Yh~_!s1qS`|0~0)YsbHSz8!mG)WiJE| z2f($6TQtt6L_f~ApQYQKSb=`053LgrQq7G@98#igV>y#i==-nEjQ!XNu9 z~;mE+gtj4IDDNQJ~JVk5Ux6&LCSFL!y=>79kE9=V}J7tD==Ga+IW zX)r7>VZ9dY=V&}DR))xUoV!u(Z|%3ciQi_2jl}3=$Agc(`RPb z8kEBpvY>1FGQ9W$n>Cq=DIpski};nE)`p3IUw1Oz0|wxll^)4dq3;CCY@RyJgFgc# zKouFh!`?Xuo{IMz^xi-h=StCis_M7yq$u) z?XHvw*HP0VgR+KR6wI)jEMX|ssqYvSf*_3W8zVTQzD?3>H!#>InzpSO)@SC8q*ii- z%%h}_#0{4JG;Jm`4zg};BPTGkYamx$Xo#O~lBirRY)q=5M45n{GCfV7h9qwyu1NxOMoP4)jjZMxmT|IQQh0U7C$EbnMN<3)Kk?fFHYq$d|ICu>KbY_hO zTZM+uKHe(cIZfEqyzyYSUBZa8;Fcut-GN!HSA9ius`ltNebF46ZX_BbZNU}}ZOm{M2&nANL9@0qvih15(|`S~z}m&h!u4x~(%MAO$jHRWNfuxWF#B)E&g3ghSQ9|> z(MFaLQj)NE0lowyjvg8z0#m6FIuKE9lDO~Glg}nSb7`~^&#(Lw{}GVOS>U)m8bF}x zVjbXljBm34Cs-yM6TVusr+3kYFjr28STT3g056y3cH5Tmge~ASxBj z%|yb>$eF;WgrcOZf569sDZOVwoo%8>XO>XQOX1OyN9I-SQgrm;U;+#3OI(zrWyow3 zk==|{lt2xrQ%FIXOTejR>;wv(Pb8u8}BUpx?yd(Abh6? zsoO3VYWkeLnF43&@*#MQ9-i-d0t*xN-UEyNKeyNMHw|A(k(_6QKO=nKMCxD(W(Yop zsRQ)QeL4X3Lxp^L%wzi2-WVSsf61dqliPUM7srDB?Wm6Lzn0&{*}|IsKQW;02(Y&| zaTKv|`U(pSzuvR6Rduu$wzK_W-Y-7>7s?G$)U}&uK;<>vU}^^ns@Z!p+9?St1s)dG zK%y6xkPyyS1$~&6v{kl?Md6gwM|>mt6Upm>oa8RLD^8T{0?HC!Z>;(Bob7el(DV6x zi`I)$&E&ngwFS@bi4^xFLAn`=fzTC;aimE^!cMI2n@Vo%Ae-ne`RF((&5y6xsjjAZ zVguVoQ?Z9uk$2ON;ersE%PU*xGO@T*;j1BO5#TuZKEf(mB7|g7pcEA=nYJ{s3vlbg zd4-DUlD{*6o%Gc^N!Nptgay>j6E5;3psI+C3Q!1ZIbeCubW%w4pq9)MSDyB{HLm|k zxv-{$$A*pS@csolri$Ge<4VZ}e~78JOL-EVyrbxKra^d{?|NnPp86!q>t<&IP07?Z z^>~IK^k#OEKgRH+LjllZXk7iA>2cfH6+(e&9ku5poo~6y{GC5>(bRK7hwjiurqAiZ zg*DmtgY}v83IjE&AbiWgMyFbaRUPZ{lYiz$U^&Zt2YjG<%m((&_JUbZcfJ22(>bi5 z!J?<7AySj0JZ&<-qXX;mcV!f~>G=sB0KnjWca4}vrtunD^1TrpfeS^4dvFr!65knK zZh`d;*VOkPs4*-9kL>$GP0`(M!j~B;#x?Ba~&s6CopvO86oM?-? zOw#dIRc;6A6T?B`Qp%^<U5 z19x(ywSH$_N+Io!6;e?`tWaM$`=Db!gzx|lQ${DG!zb1Zl&|{kX0y6xvO1o z220r<-oaS^^R2pEyY;=Qllqpmue|5yI~D|iI!IGt@iod{Opz@*ml^w2bNs)p`M(Io z|E;;m*Xpjd9l)4G#KaWfV(t8YUn@A;nK^#xgv=LtnArX|vWQVuw3}B${h+frU2>9^ z!l6)!Uo4`5k`<<;E(ido7M6lKTgWezNLq>U*=uz&s=cc$1%>VrAeOoUtA|T6gO4>UNqsdK=NF*8|~*sl&wI=x9-EGiq*aqV!(VVXA57 zw9*o6Ir8Lj1npUXvlevtn(_+^X5rzdR>#(}4YcB9O50q97%rW2me5_L=%ffYPUSRc z!vv?Kv>dH994Qi>U(a<0KF6NH5b16enCp+mw^Hb3Xs1^tThFpz!3QuN#}KBbww`(h z7GO)1olDqy6?T$()R7y%NYx*B0k_2IBiZ14&8|JPFxeMF{vW>HF-Vi3+ZOI=+qP}n zw(+!WcTd~4ZJX1!ZM&y!+uyt=&i!+~d(V%GjH;-NsEEv6nS1TERt|RHh!0>W4+4pp z1-*EzAM~i`+1f(VEHI8So`S`akPfPTfq*`l{Fz`hS%k#JS0cjT2mS0#QLGf=J?1`he3W*;m4)ce8*WFq1sdP=~$5RlH1EdWm|~dCvKOi4*I_96{^95p#B<(n!d?B z=o`0{t+&OMwKcxiBECznJcfH!fL(z3OvmxP#oWd48|mMjpE||zdiTBdWelj8&Qosv zZFp@&UgXuvJw5y=q6*28AtxZzo-UUpkRW%ne+Ylf!V-0+uQXBW=5S1o#6LXNtY5!I z%Rkz#(S8Pjz*P7bqB6L|M#Er{|QLae-Y{KA>`^} z@lPjeX>90X|34S-7}ZVXe{wEei1<{*e8T-Nbj8JmD4iwcE+Hg_zhkPVm#=@b$;)h6 z<<6y`nPa`f3I6`!28d@kdM{uJOgM%`EvlQ5B2bL)Sl=|y@YB3KeOzz=9cUW3clPAU z^sYc}xf9{4Oj?L5MOlYxR{+>w=vJjvbyO5}ptT(o6dR|ygO$)nVCvNGnq(6;bHlBd zl?w-|plD8spjDF03g5ip;W3Z z><0{BCq!Dw;h5~#1BuQilq*TwEu)qy50@+BE4bX28+7erX{BD4H)N+7U`AVEuREE8 z;X?~fyhF-x_sRfHIj~6f(+^@H)D=ngP;mwJjxhQUbUdzk8f94Ab%59-eRIq?ZKrwD z(BFI=)xrUlgu(b|hAysqK<}8bslmNNeD=#JW*}^~Nrswn^xw*nL@Tx!49bfJecV&KC2G4q5a!NSv)06A_5N3Y?veAz;Gv+@U3R% z)~UA8-0LvVE{}8LVDOHzp~2twReqf}ODIyXMM6=W>kL|OHcx9P%+aJGYi_Om)b!xe zF40Vntn0+VP>o<$AtP&JANjXBn7$}C@{+@3I@cqlwR2MdwGhVPxlTIcRVu@Ho-wO` z_~Or~IMG)A_`6-p)KPS@cT9mu9RGA>dVh5wY$NM9-^c@N=hcNaw4ITjm;iWSP^ZX| z)_XpaI61<+La+U&&%2a z0za$)-wZP@mwSELo#3!PGTt$uy0C(nTT@9NX*r3Ctw6J~7A(m#8fE)0RBd`TdKfAT zCf@$MAxjP`O(u9s@c0Fd@|}UQ6qp)O5Q5DPCeE6mSIh|Rj{$cAVIWsA=xPKVKxdhg zLzPZ`3CS+KIO;T}0Ip!fAUaNU>++ZJZRk@I(h<)RsJUhZ&Ru9*!4Ptn;gX^~4E8W^TSR&~3BAZc#HquXn)OW|TJ`CTahk+{qe`5+ixON^zA9IFd8)kc%*!AiLu z>`SFoZ5bW-%7}xZ>gpJcx_hpF$2l+533{gW{a7ce^B9sIdmLrI0)4yivZ^(Vh@-1q zFT!NQK$Iz^xu%|EOK=n>ug;(7J4OnS$;yWmq>A;hsD_0oAbLYhW^1Vdt9>;(JIYjf zdb+&f&D4@4AS?!*XpH>8egQvSVX`36jMd>$+RgI|pEg))^djhGSo&#lhS~9%NuWfX zDDH;3T*GzRT@5=7ibO>N-6_XPBYxno@mD_3I#rDD?iADxX`! zh*v8^i*JEMzyN#bGEBz7;UYXki*Xr(9xXax(_1qVW=Ml)kSuvK$coq2A(5ZGhs_pF z$*w}FbN6+QDseuB9=fdp_MTs)nQf!2SlROQ!gBJBCXD&@-VurqHj0wm@LWX-TDmS= z71M__vAok|@!qgi#H&H%Vg-((ZfxPAL8AI{x|VV!9)ZE}_l>iWk8UPTGHs*?u7RfP z5MC&=c6X;XlUzrz5q?(!eO@~* zoh2I*%J7dF!!_!vXoSIn5o|wj1#_>K*&CIn{qSaRc&iFVxt*^20ngCL;QonIS>I5^ zMw8HXm>W0PGd*}Ko)f|~dDd%;Wu_RWI_d;&2g6R3S63Uzjd7dn%Svu-OKpx*o|N>F zZg=-~qLb~VRLpv`k zWSdfHh@?dp=s_X`{yxOlxE$4iuyS;Z-x!*E6eqmEm*j2bE@=ZI0YZ5%Yj29!5+J$4h{s($nakA`xgbO8w zi=*r}PWz#lTL_DSAu1?f%-2OjD}NHXp4pXOsCW;DS@BC3h-q4_l`<))8WgzkdXg3! zs1WMt32kS2E#L0p_|x+x**TFV=gn`m9BWlzF{b%6j-odf4{7a4y4Uaef@YaeuPhU8 zHBvRqN^;$Jizy+ z=zW{E5<>2gp$pH{M@S*!sJVQU)b*J5*bX4h>5VJve#Q6ga}cQ&iL#=(u+KroWrxa%8&~p{WEUF0il=db;-$=A;&9M{Rq`ouZ5m%BHT6%st%saGsD6)fQgLN}x@d3q>FC;=f%O3Cyg=Ke@Gh`XW za@RajqOE9UB6eE=zhG%|dYS)IW)&y&Id2n7r)6p_)vlRP7NJL(x4UbhlcFXWT8?K=%s7;z?Vjts?y2+r|uk8Wt(DM*73^W%pAkZa1Jd zNoE)8FvQA>Z`eR5Z@Ig6kS5?0h;`Y&OL2D&xnnAUzQz{YSdh0k zB3exx%A2TyI)M*EM6htrxSlep!Kk(P(VP`$p0G~f$smld6W1r_Z+o?=IB@^weq>5VYsYZZR@` z&XJFxd5{|KPZmVOSxc@^%71C@;z}}WhbF9p!%yLj3j%YOlPL5s>7I3vj25 z@xmf=*z%Wb4;Va6SDk9cv|r*lhZ`(y_*M@>q;wrn)oQx%B(2A$9(74>;$zmQ!4fN; z>XurIk-7@wZys<+7XL@0Fhe-f%*=(weaQEdR9Eh6>Kl-EcI({qoZqyzziGwpg-GM#251sK_ z=3|kitS!j%;fpc@oWn65SEL73^N&t>Ix37xgs= zYG%eQDJc|rqHFia0!_sm7`@lvcv)gfy(+KXA@E{3t1DaZ$DijWAcA)E0@X?2ziJ{v z&KOYZ|DdkM{}t+@{@*6ge}m%xfjIxi%qh`=^2Rwz@w0cCvZ&Tc#UmCDbVwABrON^x zEBK43FO@weA8s7zggCOWhMvGGE`baZ62cC)VHyy!5Zbt%ieH+XN|OLbAFPZWyC6)p z4P3%8sq9HdS3=ih^0OOlqTPbKuzQ?lBEI{w^ReUO{V?@`ARsL|S*%yOS=Z%sF)>-y z(LAQdhgAcuF6LQjRYfdbD1g4o%tV4EiK&ElLB&^VZHbrV1K>tHTO{#XTo>)2UMm`2 z^t4s;vnMQgf-njU-RVBRw0P0-m#d-u`(kq7NL&2T)TjI_@iKuPAK-@oH(J8?%(e!0Ir$yG32@CGUPn5w4)+9@8c&pGx z+K3GKESI4*`tYlmMHt@br;jBWTei&(a=iYslc^c#RU3Q&sYp zSG){)V<(g7+8W!Wxeb5zJb4XE{I|&Y4UrFWr%LHkdQ;~XU zgy^dH-Z3lmY+0G~?DrC_S4@=>0oM8Isw%g(id10gWkoz2Q%7W$bFk@mIzTCcIB(K8 zc<5h&ZzCdT=9n-D>&a8vl+=ZF*`uTvQviG_bLde*k>{^)&0o*b05x$MO3gVLUx`xZ z43j+>!u?XV)Yp@MmG%Y`+COH2?nQcMrQ%k~6#O%PeD_WvFO~Kct za4XoCM_X!c5vhRkIdV=xUB3xI2NNStK*8_Zl!cFjOvp-AY=D;5{uXj}GV{LK1~IE2 z|KffUiBaStRr;10R~K2VVtf{TzM7FaPm;Y(zQjILn+tIPSrJh&EMf6evaBKIvi42-WYU9Vhj~3< zZSM-B;E`g_o8_XTM9IzEL=9Lb^SPhe(f(-`Yh=X6O7+6ALXnTcUFpI>ekl6v)ZQeNCg2 z^H|{SKXHU*%nBQ@I3It0m^h+6tvI@FS=MYS$ZpBaG7j#V@P2ZuYySbp@hA# ze(kc;P4i_-_UDP?%<6>%tTRih6VBgScKU^BV6Aoeg6Uh(W^#J^V$Xo^4#Ekp ztqQVK^g9gKMTHvV7nb64UU7p~!B?>Y0oFH5T7#BSW#YfSB@5PtE~#SCCg3p^o=NkMk$<8- z6PT*yIKGrvne7+y3}_!AC8NNeI?iTY(&nakN>>U-zT0wzZf-RuyZk^X9H-DT_*wk= z;&0}6LsGtfVa1q)CEUPlx#(ED@-?H<1_FrHU#z5^P3lEB|qsxEyn%FOpjx z3S?~gvoXy~L(Q{Jh6*i~=f%9kM1>RGjBzQh_SaIDfSU_9!<>*Pm>l)cJD@wlyxpBV z4Fmhc2q=R_wHCEK69<*wG%}mgD1=FHi4h!98B-*vMu4ZGW~%IrYSLGU{^TuseqVgV zLP<%wirIL`VLyJv9XG_p8w@Q4HzNt-o;U@Au{7%Ji;53!7V8Rv0^Lu^Vf*sL>R(;c zQG_ZuFl)Mh-xEIkGu}?_(HwkB2jS;HdPLSxVU&Jxy9*XRG~^HY(f0g8Q}iqnVmgjI zfd=``2&8GsycjR?M%(zMjn;tn9agcq;&rR!Hp z$B*gzHsQ~aXw8c|a(L^LW(|`yGc!qOnV(ZjU_Q-4z1&0;jG&vAKuNG=F|H?@m5^N@ zq{E!1n;)kNTJ>|Hb2ODt-7U~-MOIFo%9I)_@7fnX+eMMNh>)V$IXesJpBn|uo8f~#aOFytCT zf9&%MCLf8mp4kwHTcojWmM3LU=#|{3L>E}SKwOd?%{HogCZ_Z1BSA}P#O(%H$;z7XyJ^sjGX;j5 zrzp>|Ud;*&VAU3x#f{CKwY7Vc{%TKKqmB@oTHA9;>?!nvMA;8+Jh=cambHz#J18x~ zs!dF>$*AnsQ{{82r5Aw&^7eRCdvcgyxH?*DV5(I$qXh^zS>us*I66_MbL8y4d3ULj z{S(ipo+T3Ag!+5`NU2sc+@*m{_X|&p#O-SAqF&g_n7ObB82~$p%fXA5GLHMC+#qqL zdt`sJC&6C2)=juQ_!NeD>U8lDVpAOkW*khf7MCcs$A(wiIl#B9HM%~GtQ^}yBPjT@ z+E=|A!Z?A(rwzZ;T}o6pOVqHzTr*i;Wrc%&36kc@jXq~+w8kVrs;%=IFdACoLAcCAmhFNpbP8;s`zG|HC2Gv?I~w4ITy=g$`0qMQdkijLSOtX6xW%Z9Nw<;M- zMN`c7=$QxN00DiSjbVt9Mi6-pjv*j(_8PyV-il8Q-&TwBwH1gz1uoxs6~uU}PrgWB zIAE_I-a1EqlIaGQNbcp@iI8W1sm9fBBNOk(k&iLBe%MCo#?xI$%ZmGA?=)M9D=0t7 zc)Q0LnI)kCy{`jCGy9lYX%mUsDWwsY`;jE(;Us@gmWPqjmXL+Hu#^;k%eT>{nMtzj zsV`Iy6leTA8-PndszF;N^X@CJrTw5IIm!GPeu)H2#FQitR{1p;MasQVAG3*+=9FYK zw*k!HT(YQorfQj+1*mCV458(T5=fH`um$gS38hw(OqVMyunQ;rW5aPbF##A3fGH6h z@W)i9Uff?qz`YbK4c}JzQpuxuE3pcQO)%xBRZp{zJ^-*|oryTxJ-rR+MXJ)!f=+pp z10H|DdGd2exhi+hftcYbM0_}C0ZI-2vh+$fU1acsB-YXid7O|=9L!3e@$H*6?G*Zp z%qFB(sgl=FcC=E4CYGp4CN>=M8#5r!RU!u+FJVlH6=gI5xHVD&k;Ta*M28BsxfMV~ zLz+@6TxnfLhF@5=yQo^1&S}cmTN@m!7*c6z;}~*!hNBjuE>NLVl2EwN!F+)0$R1S! zR|lF%n!9fkZ@gPW|x|B={V6x3`=jS*$Pu0+5OWf?wnIy>Y1MbbGSncpKO0qE(qO=ts z!~@&!N`10S593pVQu4FzpOh!tvg}p%zCU(aV5=~K#bKi zHdJ1>tQSrhW%KOky;iW+O_n;`l9~omqM%sdxdLtI`TrJzN6BQz+7xOl*rM>xVI2~# z)7FJ^Dc{DC<%~VS?@WXzuOG$YPLC;>#vUJ^MmtbSL`_yXtNKa$Hk+l-c!aC7gn(Cg ze?YPYZ(2Jw{SF6MiO5(%_pTo7j@&DHNW`|lD`~{iH+_eSTS&OC*2WTT*a`?|9w1dh zh1nh@$a}T#WE5$7Od~NvSEU)T(W$p$s5fe^GpG+7fdJ9=enRT9$wEk+ZaB>G3$KQO zgq?-rZZnIv!p#>Ty~}c*Lb_jxJg$eGM*XwHUwuQ|o^}b3^T6Bxx{!?va8aC@-xK*H ztJBFvFfsSWu89%@b^l3-B~O!CXs)I6Y}y#0C0U0R0WG zybjroj$io0j}3%P7zADXOwHwafT#uu*zfM!oD$6aJx7+WL%t-@6^rD_a_M?S^>c;z zMK580bZXo1f*L$CuMeM4Mp!;P@}b~$cd(s5*q~FP+NHSq;nw3fbWyH)i2)-;gQl{S zZO!T}A}fC}vUdskGSq&{`oxt~0i?0xhr6I47_tBc`fqaSrMOzR4>0H^;A zF)hX1nfHs)%Zb-(YGX;=#2R6C{BG;k=?FfP?9{_uFLri~-~AJ;jw({4MU7e*d)?P@ zXX*GkNY9ItFjhwgAIWq7Y!ksbMzfqpG)IrqKx9q{zu%Mdl+{Dis#p9q`02pr1LG8R z@As?eG!>IoROgS!@J*to<27coFc1zpkh?w=)h9CbYe%^Q!Ui46Y*HO0mr% zEff-*$ndMNw}H2a5@BsGj5oFfd!T(F&0$<{GO!Qdd?McKkorh=5{EIjDTHU`So>8V zBA-fqVLb2;u7UhDV1xMI?y>fe3~4urv3%PX)lDw+HYa;HFkaLqi4c~VtCm&Ca+9C~ zge+67hp#R9`+Euq59WhHX&7~RlXn=--m8$iZ~~1C8cv^2(qO#X0?vl91gzUKBeR1J z^p4!!&7)3#@@X&2aF2-)1Ffcc^F8r|RtdL2X%HgN&XU-KH2SLCbpw?J5xJ*!F-ypZ zMG%AJ!Pr&}`LW?E!K~=(NJxuSVTRCGJ$2a*Ao=uUDSys!OFYu!Vs2IT;xQ6EubLIl z+?+nMGeQQhh~??0!s4iQ#gm3!BpMpnY?04kK375e((Uc7B3RMj;wE?BCoQGu=UlZt!EZ1Q*auI)dj3Jj{Ujgt zW5hd~-HWBLI_3HuO) zNrb^XzPsTIb=*a69wAAA3J6AAZZ1VsYbIG}a`=d6?PjM)3EPaDpW2YP$|GrBX{q*! z$KBHNif)OKMBCFP5>!1d=DK>8u+Upm-{hj5o|Wn$vh1&K!lVfDB&47lw$tJ?d5|=B z^(_9=(1T3Fte)z^>|3**n}mIX;mMN5v2F#l(q*CvU{Ga`@VMp#%rQkDBy7kYbmb-q z<5!4iuB#Q_lLZ8}h|hPODI^U6`gzLJre9u3k3c#%86IKI*^H-@I48Bi*@avYm4v!n0+v zWu{M{&F8#p9cx+gF0yTB_<2QUrjMPo9*7^-uP#~gGW~y3nfPAoV%amgr>PSyVAd@l)}8#X zR5zV6t*uKJZL}?NYvPVK6J0v4iVpwiN|>+t3aYiZSp;m0!(1`bHO}TEtWR1tY%BPB z(W!0DmXbZAsT$iC13p4f>u*ZAy@JoLAkJhzFf1#4;#1deO8#8d&89}en&z!W&A3++^1(;>0SB1*54d@y&9Pn;^IAf3GiXbfT`_>{R+Xv; zQvgL>+0#8-laO!j#-WB~(I>l0NCMt_;@Gp_f0#^c)t?&#Xh1-7RR0@zPyBz!U#0Av zT?}n({(p?p7!4S2ZBw)#KdCG)uPnZe+U|0{BW!m)9 zi_9$F?m<`2!`JNFv+w8MK_K)qJ^aO@7-Ig>cM4-r0bi=>?B_2mFNJ}aE3<+QCzRr*NA!QjHw# z`1OsvcoD0?%jq{*7b!l|L1+Tw0TTAM4XMq7*ntc-Ived>Sj_ZtS|uVdpfg1_I9knY z2{GM_j5sDC7(W&}#s{jqbybqJWyn?{PW*&cQIU|*v8YGOKKlGl@?c#TCnmnAkAzV- zmK={|1G90zz=YUvC}+fMqts0d4vgA%t6Jhjv?d;(Z}(Ep8fTZfHA9``fdUHkA+z3+ zhh{ohP%Bj?T~{i0sYCQ}uC#5BwN`skI7`|c%kqkyWIQ;!ysvA8H`b-t()n6>GJj6xlYDu~8qX{AFo$Cm3d|XFL=4uvc?Keb zzb0ZmMoXca6Mob>JqkNuoP>B2Z>D`Q(TvrG6m`j}-1rGP!g|qoL=$FVQYxJQjFn33lODt3Wb1j8VR zlR++vIT6^DtYxAv_hxupbLLN3e0%A%a+hWTKDV3!Fjr^cWJ{scsAdfhpI)`Bms^M6 zQG$waKgFr=c|p9Piug=fcJvZ1ThMnNhQvBAg-8~b1?6wL*WyqXhtj^g(Ke}mEfZVM zJuLNTUVh#WsE*a6uqiz`b#9ZYg3+2%=C(6AvZGc=u&<6??!slB1a9K)=VL zY9EL^mfyKnD zSJyYBc_>G;5RRnrNgzJz#Rkn3S1`mZgO`(r5;Hw6MveN(URf_XS-r58Cn80K)ArH4 z#Rrd~LG1W&@ttw85cjp8xV&>$b%nSXH_*W}7Ch2pg$$c0BdEo-HWRTZcxngIBJad> z;C>b{jIXjb_9Jis?NZJsdm^EG}e*pR&DAy0EaSGi3XWTa(>C%tz1n$u?5Fb z1qtl?;_yjYo)(gB^iQq?=jusF%kywm?CJP~zEHi0NbZ);$(H$w(Hy@{i>$wcVRD_X|w-~(0Z9BJyh zhNh;+eQ9BEIs;tPz%jSVnfCP!3L&9YtEP;svoj_bNzeGSQIAjd zBss@A;)R^WAu-37RQrM%{DfBNRx>v!G31Z}8-El9IOJlb_MSoMu2}GDYycNaf>uny z+8xykD-7ONCM!APry_Lw6-yT>5!tR}W;W`C)1>pxSs5o1z#j7%m=&=7O4hz+Lsqm` z*>{+xsabZPr&X=}G@obTb{nPTkccJX8w3CG7X+1+t{JcMabv~UNv+G?txRqXib~c^Mo}`q{$`;EBNJ;#F*{gvS12kV?AZ%O0SFB$^ zn+}!HbmEj}w{Vq(G)OGAzH}R~kS^;(-s&=ectz8vN!_)Yl$$U@HNTI-pV`LSj7Opu zTZ5zZ)-S_{GcEQPIQXLQ#oMS`HPu{`SQiAZ)m1at*Hy%3xma|>o`h%E%8BEbi9p0r zVjcsh<{NBKQ4eKlXU|}@XJ#@uQw*$4BxKn6#W~I4T<^f99~(=}a`&3(ur8R9t+|AQ zWkQx7l}wa48-jO@ft2h+7qn%SJtL%~890FG0s5g*kNbL3I&@brh&f6)TlM`K^(bhr zJWM6N6x3flOw$@|C@kPi7yP&SP?bzP-E|HSXQXG>7gk|R9BTj`e=4de9C6+H7H7n# z#GJeVs1mtHhLDmVO?LkYRQc`DVOJ_vdl8VUihO-j#t=0T3%Fc1f9F73ufJz*adn*p zc%&vi(4NqHu^R>sAT_0EDjVR8bc%wTz#$;%NU-kbDyL_dg0%TFafZwZ?5KZpcuaO54Z9hX zD$u>q!-9`U6-D`E#`W~fIfiIF5_m6{fvM)b1NG3xf4Auw;Go~Fu7cth#DlUn{@~yu z=B;RT*dp?bO}o%4x7k9v{r=Y@^YQ^UUm(Qmliw8brO^=NP+UOohLYiaEB3^DB56&V zK?4jV61B|1Uj_5fBKW;8LdwOFZKWp)g{B%7g1~DgO&N& z#lisxf?R~Z@?3E$Mms$$JK8oe@X`5m98V*aV6Ua}8Xs2#A!{x?IP|N(%nxsH?^c{& z@vY&R1QmQs83BW28qAmJfS7MYi=h(YK??@EhjL-t*5W!p z^gYX!Q6-vBqcv~ruw@oMaU&qp0Fb(dbVzm5xJN%0o_^@fWq$oa3X?9s%+b)x4w-q5Koe(@j6Ez7V@~NRFvd zfBH~)U5!ix3isg`6be__wBJp=1@yfsCMw1C@y+9WYD9_C%{Q~7^0AF2KFryfLlUP# zwrtJEcH)jm48!6tUcxiurAMaiD04C&tPe6DI0#aoqz#Bt0_7_*X*TsF7u*zv(iEfA z;$@?XVu~oX#1YXtceQL{dSneL&*nDug^OW$DSLF0M1Im|sSX8R26&)<0Fbh^*l6!5wfSu8MpMoh=2l z^^0Sr$UpZp*9oqa23fcCfm7`ya2<4wzJ`Axt7e4jJrRFVf?nY~2&tRL* zd;6_njcz01c>$IvN=?K}9ie%Z(BO@JG2J}fT#BJQ+f5LFSgup7i!xWRKw6)iITjZU z%l6hPZia>R!`aZjwCp}I zg)%20;}f+&@t;(%5;RHL>K_&7MH^S+7<|(SZH!u zznW|jz$uA`P9@ZWtJgv$EFp>)K&Gt+4C6#*khZQXS*S~6N%JDT$r`aJDs9|uXWdbg zBwho$phWx}x!qy8&}6y5Vr$G{yGSE*r$^r{}pw zVTZKvikRZ`J_IJrjc=X1uw?estdwm&bEahku&D04HD+0Bm~q#YGS6gp!KLf$A{%Qd z&&yX@Hp>~(wU{|(#U&Bf92+1i&Q*-S+=y=3pSZy$#8Uc$#7oiJUuO{cE6=tsPhwPe| zxQpK>`Dbka`V)$}e6_OXKLB%i76~4N*zA?X+PrhH<&)}prET;kel24kW%+9))G^JI zsq7L{P}^#QsZViX%KgxBvEugr>ZmFqe^oAg?{EI=&_O#e)F3V#rc z8$4}0Zr19qd3tE4#$3_f=Bbx9oV6VO!d3(R===i-7p=Vj`520w0D3W6lQfY48}!D* z&)lZMG;~er2qBoI2gsX+Ts-hnpS~NYRDtPd^FPzn!^&yxRy#CSz(b&E*tL|jIkq|l zf%>)7Dtu>jCf`-7R#*GhGn4FkYf;B$+9IxmqH|lf6$4irg{0ept__%)V*R_OK=T06 zyT_m-o@Kp6U{l5h>W1hGq*X#8*y@<;vsOFqEjTQXFEotR+{3}ODDnj;o0@!bB5x=N z394FojuGOtVKBlVRLtHp%EJv_G5q=AgF)SKyRN5=cGBjDWv4LDn$IL`*=~J7u&Dy5 zrMc83y+w^F&{?X(KOOAl-sWZDb{9X9#jrQtmrEXD?;h-}SYT7yM(X_6qksM=K_a;Z z3u0qT0TtaNvDER_8x*rxXw&C^|h{P1qxK|@pS7vdlZ#P z7PdB7MmC2}%sdzAxt>;WM1s0??`1983O4nFK|hVAbHcZ3x{PzytQLkCVk7hA!Lo` zEJH?4qw|}WH{dc4z%aB=0XqsFW?^p=X}4xnCJXK%c#ItOSjdSO`UXJyuc8bh^Cf}8 z@Ht|vXd^6{Fgai8*tmyRGmD_s_nv~r^Fy7j`Bu`6=G)5H$i7Q7lvQnmea&TGvJp9a|qOrUymZ$6G|Ly z#zOCg++$3iB$!6!>215A4!iryregKuUT344X)jQb3|9qY>c0LO{6Vby05n~VFzd?q zgGZv&FGlkiH*`fTurp>B8v&nSxNz)=5IF$=@rgND4d`!AaaX;_lK~)-U8la_Wa8i?NJC@BURO*sUW)E9oyv3RG^YGfN%BmxzjlT)bp*$<| zX3tt?EAy<&K+bhIuMs-g#=d1}N_?isY)6Ay$mDOKRh z4v1asEGWoAp=srraLW^h&_Uw|6O+r;wns=uwYm=JN4Q!quD8SQRSeEcGh|Eb5Jg8m zOT}u;N|x@aq)=&;wufCc^#)5U^VcZw;d_wwaoh9$p@Xrc{DD6GZUqZ ziC6OT^zSq@-lhbgR8B+e;7_Giv;DK5gn^$bs<6~SUadiosfewWDJu`XsBfOd1|p=q zE>m=zF}!lObA%ePey~gqU8S6h-^J2Y?>7)L2+%8kV}Gp=h`Xm_}rlm)SyUS=`=S7msKu zC|T!gPiI1rWGb1z$Md?0YJQ;%>uPLOXf1Z>N~`~JHJ!^@D5kSXQ4ugnFZ>^`zH8CAiZmp z6Ms|#2gcGsQ{{u7+Nb9sA?U>(0e$5V1|WVwY`Kn)rsnnZ4=1u=7u!4WexZD^IQ1Jk zfF#NLe>W$3m&C^ULjdw+5|)-BSHwpegdyt9NYC{3@QtMfd8GrIWDu`gd0nv-3LpGCh@wgBaG z176tikL!_NXM+Bv#7q^cyn9$XSeZR6#!B4JE@GVH zoobHZN_*RF#@_SVYKkQ_igme-Y5U}cV(hkR#k1c{bQNMji zU7aE`?dHyx=1`kOYZo_8U7?3-7vHOp`Qe%Z*i+FX!s?6huNp0iCEW-Z7E&jRWmUW_ z67j>)Ew!yq)hhG4o?^z}HWH-e=es#xJUhDRc4B51M4~E-l5VZ!&zQq`gWe`?}#b~7w1LH4Xa-UCT5LXkXQWheBa2YJYbyQ zl1pXR%b(KCXMO0OsXgl0P0Og<{(@&z1aokU-Pq`eQq*JYgt8xdFQ6S z6Z3IFSua8W&M#`~*L#r>Jfd6*BzJ?JFdBR#bDv$_0N!_5vnmo@!>vULcDm`MFU823 zpG9pqjqz^FE5zMDoGqhs5OMmC{Y3iVcl>F}5Rs24Y5B^mYQ;1T&ks@pIApHOdrzXF z-SdX}Hf{X;TaSxG_T$0~#RhqKISGKNK47}0*x&nRIPtmdwxc&QT3$8&!3fWu1eZ_P zJveQj^hJL#Sn!*4k`3}(d(aasl&7G0j0-*_2xtAnoX1@9+h zO#c>YQg60Z;o{Bi=3i7S`Ic+ZE>K{(u|#)9y}q*j8uKQ1^>+(BI}m%1v3$=4ojGBc zm+o1*!T&b}-lVvZqIUBc8V}QyFEgm#oyIuC{8WqUNV{Toz`oxhYpP!_p2oHHh5P@iB*NVo~2=GQm+8Yrkm2Xjc_VyHg1c0>+o~@>*Qzo zHVBJS>$$}$_4EniTI;b1WShX<5-p#TPB&!;lP!lBVBbLOOxh6FuYloD%m;n{r|;MU3!q4AVkua~fieeWu2 zQAQ$ue(IklX6+V;F1vCu-&V?I3d42FgWgsb_e^29ol}HYft?{SLf>DrmOp9o!t>I^ zY7fBCk+E8n_|apgM|-;^=#B?6RnFKlN`oR)`e$+;D=yO-(U^jV;rft^G_zl`n7qnM zL z*-Y4Phq+ZI1$j$F-f;`CD#|`-T~OM5Q>x}a>B~Gb3-+9i>Lfr|Ca6S^8g*{*?_5!x zH_N!SoRP=gX1?)q%>QTY!r77e2j9W(I!uAz{T`NdNmPBBUzi2{`XMB^zJGGwFWeA9 z{fk33#*9SO0)DjROug+(M)I-pKA!CX;IY(#gE!UxXVsa)X!UftIN98{pt#4MJHOhY zM$_l}-TJlxY?LS6Nuz1T<44m<4i^8k@D$zuCPrkmz@sdv+{ciyFJG2Zwy&%c7;atIeTdh!a(R^QXnu1Oq1b42*OQFWnyQ zWeQrdvP|w_idy53Wa<{QH^lFmEd+VlJkyiC>6B#s)F;w-{c;aKIm;Kp50HnA-o3lY z9B~F$gJ@yYE#g#X&3ADx&tO+P_@mnQTz9gv30_sTsaGXkfNYXY{$(>*PEN3QL>I!k zp)KibPhrfX3%Z$H6SY`rXGYS~143wZrG2;=FLj50+VM6soI~up_>fU(2Wl@{BRsMi zO%sL3x?2l1cXTF)k&moNsHfQrQ+wu(gBt{sk#CU=UhrvJIncy@tJX5klLjgMn>~h= zg|FR&;@eh|C7`>s_9c~0-{IAPV){l|Ts`i=)AW;d9&KPc3fMeoTS%8@V~D8*h;&(^>yjT84MM}=%#LS7shLAuuj(0VAYoozhWjq z4LEr?wUe2^WGwdTIgWBkDUJa>YP@5d9^Rs$kCXmMRxuF*YMVrn?0NFyPl}>`&dqZb z<5eqR=ZG3>n2{6v6BvJ`YBZeeTtB88TAY(x0a58EWyuf>+^|x8Qa6wA|1Nb_p|nA zWWa}|z8a)--Wj`LqyFk_a3gN2>5{Rl_wbW?#by7&i*^hRknK%jwIH6=dQ8*-_{*x0j^DUfMX0`|K@6C<|1cgZ~D(e5vBFFm;HTZF(!vT8=T$K+|F)x3kqzBV4-=p1V(lzi(s7jdu0>LD#N=$Lk#3HkG!a zIF<7>%B7sRNzJ66KrFV76J<2bdYhxll0y2^_rdG=I%AgW4~)1Nvz=$1UkE^J%BxLo z+lUci`UcU062os*=`-j4IfSQA{w@y|3}Vk?i;&SSdh8n+$iHA#%ERL{;EpXl6u&8@ zzg}?hkEOUOJt?ZL=pWZFJ19mI1@P=$U5*Im1e_8Z${JsM>Ov?nh8Z zP5QvI!{Jy@&BP48%P2{Jr_VgzW;P@7)M9n|lDT|Ep#}7C$&ud&6>C^5ZiwKIg2McPU(4jhM!BD@@L(Gd*Nu$ji(ljZ<{FIeW_1Mmf;76{LU z-ywN~=uNN)Xi6$<12A9y)K%X|(W0p|&>>4OXB?IiYr||WKDOJPxiSe01NSV-h24^L z_>m$;|C+q!Mj**-qQ$L-*++en(g|hw;M!^%_h-iDjFHLo-n3JpB;p?+o2;`*jpvJU zLY^lt)Un4joij^^)O(CKs@7E%*!w>!HA4Q?0}oBJ7Nr8NQ7QmY^4~jvf0-`%waOLn zdNjAPaC0_7c|RVhw)+71NWjRi!y>C+Bl;Z`NiL^zn2*0kmj5gyhCLCxts*cWCdRI| zjsd=sT5BVJc^$GxP~YF$-U{-?kW6r@^vHXB%{CqYzU@1>dzf#3SYedJG-Rm6^RB7s zGM5PR(yKPKR)>?~vpUIeTP7A1sc8-knnJk*9)3t^e%izbdm>Y=W{$wm(cy1RB-19i za#828DMBY+ps#7Y8^6t)=Ea@%Nkt)O6JCx|ybC;Ap}Z@Zw~*}3P>MZLPb4Enxz9Wf zssobT^(R@KuShj8>@!1M7tm|2%-pYYDxz-5`rCbaTCG5{;Uxm z*g=+H1X8{NUvFGzz~wXa%Eo};I;~`37*WrRU&K0dPSB$yk(Z*@K&+mFal^?c zurbqB-+|Kb5|sznT;?Pj!+kgFY1#Dr;_%A(GIQC{3ct|{*Bji%FNa6c-thbpBkA;U zURV!Dr&X{0J}iht#-Qp2=xzuh(fM>zRoiGrYl5ttw2#r34gC41CCOC31m~^UPTK@s z6;A@)7O7_%C)>bnAXerYuAHdE93>j2N}H${zEc6&SbZ|-fiG*-qtGuy-qDelH(|u$ zorf8_T6Zqe#Ub!+e3oSyrskt_HyW_^5lrWt#30l)tHk|j$@YyEkXUOV;6B51L;M@=NIWZXU;GrAa(LGxO%|im%7F<-6N;en0Cr zLH>l*y?pMwt`1*cH~LdBPFY_l;~`N!Clyfr;7w<^X;&(ZiVdF1S5e(+Q%60zgh)s4 zn2yj$+mE=miVERP(g8}G4<85^-5f@qxh2ec?n+$A_`?qN=iyT1?U@t?V6DM~BIlBB z>u~eXm-aE>R0sQy!-I4xtCNi!!qh?R1!kKf6BoH2GG{L4%PAz0{Sh6xpuyI%*~u)s z%rLuFl)uQUCBQAtMyN;%)zFMx4loh7uTfKeB2Xif`lN?2gq6NhWhfz0u5WP9J>=V2 zo{mLtSy&BA!mSzs&CrKWq^y40JF5a&GSXIi2= z{EYb59J4}VwikL4P=>+mc6{($FNE@e=VUwG+KV21;<@lrN`mnz5jYGASyvz7BOG_6(p^eTxD-4O#lROgon;R35=|nj#eHIfJBYPWG>H>`dHKCDZ3`R{-?HO0mE~(5_WYcFmp8sU?wr*UkAQiNDGc6T zA%}GOLXlOWqL?WwfHO8MB#8M8*~Y*gz;1rWWoVSXP&IbKxbQ8+s%4Jnt?kDsq7btI zCDr0PZ)b;B%!lu&CT#RJzm{l{2fq|BcY85`w~3LSK<><@(2EdzFLt9Y_`;WXL6x`0 zDoQ?=?I@Hbr;*VVll1Gmd8*%tiXggMK81a+T(5Gx6;eNb8=uYn z5BG-0g>pP21NPn>$ntBh>`*})Fl|38oC^9Qz>~MAazH%3Q~Qb!ALMf$srexgPZ2@&c~+hxRi1;}+)-06)!#Mq<6GhP z-Q?qmgo${aFBApb5p}$1OJKTClfi8%PpnczyVKkoHw7Ml9e7ikrF0d~UB}i3vizos zXW4DN$SiEV9{faLt5bHy2a>33K%7Td-n5C*N;f&ZqAg#2hIqEb(y<&f4u5BWJ>2^4 z414GosL=Aom#m&=x_v<0-fp1r%oVJ{T-(xnomNJ(Dryv zh?vj+%=II_nV+@NR+(!fZZVM&(W6{6%9cm+o+Z6}KqzLw{(>E86uA1`_K$HqINlb1 zKelh3-jr2I9V?ych`{hta9wQ2c9=MM`2cC{m6^MhlL2{DLv7C^j z$xXBCnDl_;l|bPGMX@*tV)B!c|4oZyftUlP*?$YU9C_eAsuVHJ58?)zpbr30P*C`T z7y#ao`uE-SOG(Pi+`$=e^mle~)pRrdwL5)N;o{gpW21of(QE#U6w%*C~`v-z0QqBML!!5EeYA5IQB0 z^l01c;L6E(iytN!LhL}wfwP7W9PNAkb+)Cst?qg#$n;z41O4&v+8-zPs+XNb-q zIeeBCh#ivnFLUCwfS;p{LC0O7tm+Sf9Jn)~b%uwP{%69;QC)Ok0t%*a5M+=;y8j=v z#!*pp$9@!x;UMIs4~hP#pnfVc!%-D<+wsG@R2+J&%73lK|2G!EQC)O05TCV=&3g)C!lT=czLpZ@Sa%TYuoE?v8T8`V;e$#Zf2_Nj6nvBgh1)2 GZ~q4|mN%#X literal 59536 zcma&NbC71ylI~qywr$(CZQJHswz}-9F59+k+g;UV+cs{`J?GrGXYR~=-ydruB3JCa zB64N^cILAcWk5iofq)<(fq;O7{th4@;QxID0)qN`mJ?GIqLY#rX8-|G{5M0pdVW5^ zzXk$-2kQTAC?_N@B`&6-N-rmVFE=$QD?>*=4<|!MJu@}isLc4AW#{m2if&A5T5g&~ ziuMQeS*U5sL6J698wOd)K@oK@1{peP5&Esut<#VH^u)gp`9H4)`uE!2$>RTctN+^u z=ASkePDZA-X8)rp%D;p*~P?*a_=*Kwc<^>QSH|^<0>o37lt^+Mj1;4YvJ(JR-Y+?%Nu}JAYj5 z_Qc5%Ao#F?q32i?ZaN2OSNhWL;2oDEw_({7ZbgUjna!Fqn3NzLM@-EWFPZVmc>(fZ z0&bF-Ch#p9C{YJT9Rcr3+Y_uR^At1^BxZ#eo>$PLJF3=;t_$2|t+_6gg5(j{TmjYU zK12c&lE?Eh+2u2&6Gf*IdKS&6?rYbSEKBN!rv{YCm|Rt=UlPcW9j`0o6{66#y5t9C zruFA2iKd=H%jHf%ypOkxLnO8#H}#Zt{8p!oi6)7#NqoF({t6|J^?1e*oxqng9Q2Cc zg%5Vu!em)}Yuj?kaP!D?b?(C*w!1;>R=j90+RTkyEXz+9CufZ$C^umX^+4|JYaO<5 zmIM3#dv`DGM;@F6;(t!WngZSYzHx?9&$xEF70D1BvfVj<%+b#)vz)2iLCrTeYzUcL z(OBnNoG6Le%M+@2oo)&jdOg=iCszzv59e zDRCeaX8l1hC=8LbBt|k5?CXgep=3r9BXx1uR8!p%Z|0+4Xro=xi0G!e{c4U~1j6!) zH6adq0}#l{%*1U(Cb%4AJ}VLWKBPi0MoKFaQH6x?^hQ!6em@993xdtS%_dmevzeNl z(o?YlOI=jl(`L9^ z0O+H9k$_@`6L13eTT8ci-V0ljDMD|0ifUw|Q-Hep$xYj0hTO@0%IS^TD4b4n6EKDG z??uM;MEx`s98KYN(K0>c!C3HZdZ{+_53DO%9k5W%pr6yJusQAv_;IA}925Y%;+!tY z%2k!YQmLLOr{rF~!s<3-WEUs)`ix_mSU|cNRBIWxOox_Yb7Z=~Q45ZNe*u|m^|)d* zog=i>`=bTe!|;8F+#H>EjIMcgWcG2ORD`w0WD;YZAy5#s{65~qfI6o$+Ty&-hyMyJ z3Ra~t>R!p=5ZpxA;QkDAoPi4sYOP6>LT+}{xp}tk+<0k^CKCFdNYG(Es>p0gqD)jP zWOeX5G;9(m@?GOG7g;e74i_|SmE?`B2i;sLYwRWKLy0RLW!Hx`=!LH3&k=FuCsM=9M4|GqzA)anEHfxkB z?2iK-u(DC_T1};KaUT@3nP~LEcENT^UgPvp!QC@Dw&PVAhaEYrPey{nkcn(ro|r7XUz z%#(=$7D8uP_uU-oPHhd>>^adbCSQetgSG`e$U|7mr!`|bU0aHl_cmL)na-5x1#OsVE#m*+k84Y^+UMeSAa zbrVZHU=mFwXEaGHtXQq`2ZtjfS!B2H{5A<3(nb-6ARVV8kEmOkx6D2x7~-6hl;*-*}2Xz;J#a8Wn;_B5=m zl3dY;%krf?i-Ok^Pal-}4F`{F@TYPTwTEhxpZK5WCpfD^UmM_iYPe}wpE!Djai6_{ z*pGO=WB47#Xjb7!n2Ma)s^yeR*1rTxp`Mt4sfA+`HwZf%!7ZqGosPkw69`Ix5Ku6G z@Pa;pjzV&dn{M=QDx89t?p?d9gna*}jBly*#1!6}5K<*xDPJ{wv4& zM$17DFd~L*Te3A%yD;Dp9UGWTjRxAvMu!j^Tbc}2v~q^59d4bz zvu#!IJCy(BcWTc`;v$9tH;J%oiSJ_i7s;2`JXZF+qd4C)vY!hyCtl)sJIC{ebI*0> z@x>;EzyBv>AI-~{D6l6{ST=em*U( z(r$nuXY-#CCi^8Z2#v#UXOt`dbYN1z5jzNF2 z411?w)whZrfA20;nl&C1Gi+gk<`JSm+{|*2o<< zqM#@z_D`Cn|0H^9$|Tah)0M_X4c37|KQ*PmoT@%xHc3L1ZY6(p(sNXHa&49Frzto& zR`c~ClHpE~4Z=uKa5S(-?M8EJ$zt0&fJk~p$M#fGN1-y$7!37hld`Uw>Urri(DxLa;=#rK0g4J)pXMC zxzraOVw1+kNWpi#P=6(qxf`zSdUC?D$i`8ZI@F>k6k zz21?d+dw7b&i*>Kv5L(LH-?J%@WnqT7j#qZ9B>|Zl+=> z^U-pV@1y_ptHo4hl^cPRWewbLQ#g6XYQ@EkiP z;(=SU!yhjHp%1&MsU`FV1Z_#K1&(|5n(7IHbx&gG28HNT)*~-BQi372@|->2Aw5It z0CBpUcMA*QvsPy)#lr!lIdCi@1k4V2m!NH)%Px(vu-r(Q)HYc!p zJ^$|)j^E#q#QOgcb^pd74^JUi7fUmMiNP_o*lvx*q%_odv49Dsv$NV;6J z9GOXKomA{2Pb{w}&+yHtH?IkJJu~}Z?{Uk++2mB8zyvh*xhHKE``99>y#TdD z&(MH^^JHf;g(Tbb^&8P*;_i*2&fS$7${3WJtV7K&&(MBV2~)2KB3%cWg#1!VE~k#C z!;A;?p$s{ihyojEZz+$I1)L}&G~ml=udD9qh>Tu(ylv)?YcJT3ihapi!zgPtWb*CP zlLLJSRCj-^w?@;RU9aL2zDZY1`I3d<&OMuW=c3$o0#STpv_p3b9Wtbql>w^bBi~u4 z3D8KyF?YE?=HcKk!xcp@Cigvzy=lnFgc^9c%(^F22BWYNAYRSho@~*~S)4%AhEttv zvq>7X!!EWKG?mOd9&n>vvH1p4VzE?HCuxT-u+F&mnsfDI^}*-d00-KAauEaXqg3k@ zy#)MGX!X;&3&0s}F3q40ZmVM$(H3CLfpdL?hB6nVqMxX)q=1b}o_PG%r~hZ4gUfSp zOH4qlEOW4OMUc)_m)fMR_rl^pCfXc{$fQbI*E&mV77}kRF z&{<06AJyJ!e863o-V>FA1a9Eemx6>^F$~9ppt()ZbPGfg_NdRXBWoZnDy2;#ODgf! zgl?iOcF7Meo|{AF>KDwTgYrJLb$L2%%BEtO>T$C?|9bAB&}s;gI?lY#^tttY&hfr# zKhC+&b-rpg_?~uVK%S@mQleU#_xCsvIPK*<`E0fHE1&!J7!xD#IB|SSPW6-PyuqGn3^M^Rz%WT{e?OI^svARX&SAdU77V(C~ zM$H{Kg59op{<|8ry9ecfP%=kFm(-!W&?U0@<%z*+!*<e0XesMxRFu9QnGqun6R_%T+B%&9Dtk?*d$Q zb~>84jEAPi@&F@3wAa^Lzc(AJz5gsfZ7J53;@D<;Klpl?sK&u@gie`~vTsbOE~Cd4 z%kr56mI|#b(Jk&;p6plVwmNB0H@0SmgdmjIn5Ne@)}7Vty(yb2t3ev@22AE^s!KaN zyQ>j+F3w=wnx7w@FVCRe+`vUH)3gW%_72fxzqX!S&!dchdkRiHbXW1FMrIIBwjsai8`CB2r4mAbwp%rrO>3B$Zw;9=%fXI9B{d(UzVap7u z6piC-FQ)>}VOEuPpuqznpY`hN4dGa_1Xz9rVg(;H$5Te^F0dDv*gz9JS<|>>U0J^# z6)(4ICh+N_Q`Ft0hF|3fSHs*?a=XC;e`sJaU9&d>X4l?1W=|fr!5ShD|nv$GK;j46@BV6+{oRbWfqOBRb!ir88XD*SbC(LF}I1h#6@dvK%Toe%@ zhDyG$93H8Eu&gCYddP58iF3oQH*zLbNI;rN@E{T9%A8!=v#JLxKyUe}e}BJpB{~uN zqgxRgo0*-@-iaHPV8bTOH(rS(huwK1Xg0u+e!`(Irzu@Bld&s5&bWgVc@m7;JgELd zimVs`>vQ}B_1(2#rv#N9O`fJpVfPc7V2nv34PC);Dzbb;p!6pqHzvy?2pD&1NE)?A zt(t-ucqy@wn9`^MN5apa7K|L=9>ISC>xoc#>{@e}m#YAAa1*8-RUMKwbm|;5p>T`Z zNf*ph@tnF{gmDa3uwwN(g=`Rh)4!&)^oOy@VJaK4lMT&5#YbXkl`q?<*XtsqD z9PRK6bqb)fJw0g-^a@nu`^?71k|m3RPRjt;pIkCo1{*pdqbVs-Yl>4E>3fZx3Sv44grW=*qdSoiZ9?X0wWyO4`yDHh2E!9I!ZFi zVL8|VtW38}BOJHW(Ax#KL_KQzarbuE{(%TA)AY)@tY4%A%P%SqIU~8~-Lp3qY;U-} z`h_Gel7;K1h}7$_5ZZT0&%$Lxxr-<89V&&TCsu}LL#!xpQ1O31jaa{U34~^le*Y%L za?7$>Jk^k^pS^_M&cDs}NgXlR>16AHkSK-4TRaJSh#h&p!-!vQY%f+bmn6x`4fwTp z$727L^y`~!exvmE^W&#@uY!NxJi`g!i#(++!)?iJ(1)2Wk;RN zFK&O4eTkP$Xn~4bB|q8y(btx$R#D`O@epi4ofcETrx!IM(kWNEe42Qh(8*KqfP(c0 zouBl6>Fc_zM+V;F3znbo{x#%!?mH3`_ANJ?y7ppxS@glg#S9^MXu|FM&ynpz3o&Qh z2ujAHLF3($pH}0jXQsa#?t--TnF1P73b?4`KeJ9^qK-USHE)4!IYgMn-7z|=ALF5SNGkrtPG@Y~niUQV2?g$vzJN3nZ{7;HZHzWAeQ;5P|@Tl3YHpyznGG4-f4=XflwSJY+58-+wf?~Fg@1p1wkzuu-RF3j2JX37SQUc? zQ4v%`V8z9ZVZVqS8h|@@RpD?n0W<=hk=3Cf8R?d^9YK&e9ZybFY%jdnA)PeHvtBe- zhMLD+SSteHBq*q)d6x{)s1UrsO!byyLS$58WK;sqip$Mk{l)Y(_6hEIBsIjCr5t>( z7CdKUrJTrW%qZ#1z^n*Lb8#VdfzPw~OIL76aC+Rhr<~;4Tl!sw?Rj6hXj4XWa#6Tp z@)kJ~qOV)^Rh*-?aG>ic2*NlC2M7&LUzc9RT6WM%Cpe78`iAowe!>(T0jo&ivn8-7 zs{Qa@cGy$rE-3AY0V(l8wjI^uB8Lchj@?L}fYal^>T9z;8juH@?rG&g-t+R2dVDBe zq!K%{e-rT5jX19`(bP23LUN4+_zh2KD~EAYzhpEO3MUG8@}uBHH@4J zd`>_(K4q&>*k82(dDuC)X6JuPrBBubOg7qZ{?x!r@{%0);*`h*^F|%o?&1wX?Wr4b z1~&cy#PUuES{C#xJ84!z<1tp9sfrR(i%Tu^jnXy;4`Xk;AQCdFC@?V%|; zySdC7qS|uQRcH}EFZH%mMB~7gi}a0utE}ZE_}8PQH8f;H%PN41Cb9R%w5Oi5el^fd z$n{3SqLCnrF##x?4sa^r!O$7NX!}&}V;0ZGQ&K&i%6$3C_dR%I7%gdQ;KT6YZiQrW zk%q<74oVBV>@}CvJ4Wj!d^?#Zwq(b$E1ze4$99DuNg?6t9H}k_|D7KWD7i0-g*EO7 z;5{hSIYE4DMOK3H%|f5Edx+S0VI0Yw!tsaRS2&Il2)ea^8R5TG72BrJue|f_{2UHa z@w;^c|K3da#$TB0P3;MPlF7RuQeXT$ zS<<|C0OF(k)>fr&wOB=gP8!Qm>F41u;3esv7_0l%QHt(~+n; zf!G6%hp;Gfa9L9=AceiZs~tK+Tf*Wof=4!u{nIO90jH@iS0l+#%8=~%ASzFv7zqSB^?!@N7)kp0t&tCGLmzXSRMRyxCmCYUD2!B`? zhs$4%KO~m=VFk3Buv9osha{v+mAEq=ik3RdK@;WWTV_g&-$U4IM{1IhGX{pAu%Z&H zFfwCpUsX%RKg);B@7OUzZ{Hn{q6Vv!3#8fAg!P$IEx<0vAx;GU%}0{VIsmFBPq_mb zpe^BChDK>sc-WLKl<6 zwbW|e&d&dv9Wu0goueyu>(JyPx1mz0v4E?cJjFuKF71Q1)AL8jHO$!fYT3(;U3Re* zPPOe%*O+@JYt1bW`!W_1!mN&=w3G9ru1XsmwfS~BJ))PhD(+_J_^N6j)sx5VwbWK| zwRyC?W<`pOCY)b#AS?rluxuuGf-AJ=D!M36l{ua?@SJ5>e!IBr3CXIxWw5xUZ@Xrw z_R@%?{>d%Ld4p}nEsiA@v*nc6Ah!MUs?GA7e5Q5lPpp0@`%5xY$C;{%rz24$;vR#* zBP=a{)K#CwIY%p} zXVdxTQ^HS@O&~eIftU+Qt^~(DGxrdi3k}DdT^I7Iy5SMOp$QuD8s;+93YQ!OY{eB24%xY7ml@|M7I(Nb@K_-?F;2?et|CKkuZK_>+>Lvg!>JE~wN`BI|_h6$qi!P)+K-1Hh(1;a`os z55)4Q{oJiA(lQM#;w#Ta%T0jDNXIPM_bgESMCDEg6rM33anEr}=|Fn6)|jBP6Y}u{ zv9@%7*#RI9;fv;Yii5CI+KrRdr0DKh=L>)eO4q$1zmcSmglsV`*N(x=&Wx`*v!!hn6X-l0 zP_m;X??O(skcj+oS$cIdKhfT%ABAzz3w^la-Ucw?yBPEC+=Pe_vU8nd-HV5YX6X8r zZih&j^eLU=%*;VzhUyoLF;#8QsEfmByk+Y~caBqSvQaaWf2a{JKB9B>V&r?l^rXaC z8)6AdR@Qy_BxQrE2Fk?ewD!SwLuMj@&d_n5RZFf7=>O>hzVE*seW3U?_p|R^CfoY`?|#x9)-*yjv#lo&zP=uI`M?J zbzC<^3x7GfXA4{FZ72{PE*-mNHyy59Q;kYG@BB~NhTd6pm2Oj=_ zizmD?MKVRkT^KmXuhsk?eRQllPo2Ubk=uCKiZ&u3Xjj~<(!M94c)Tez@9M1Gfs5JV z->@II)CDJOXTtPrQudNjE}Eltbjq>6KiwAwqvAKd^|g!exgLG3;wP+#mZYr`cy3#39e653d=jrR-ulW|h#ddHu(m9mFoW~2yE zz5?dB%6vF}+`-&-W8vy^OCxm3_{02royjvmwjlp+eQDzFVEUiyO#gLv%QdDSI#3W* z?3!lL8clTaNo-DVJw@ynq?q!%6hTQi35&^>P85G$TqNt78%9_sSJt2RThO|JzM$iL zg|wjxdMC2|Icc5rX*qPL(coL!u>-xxz-rFiC!6hD1IR%|HSRsV3>Kq~&vJ=s3M5y8SG%YBQ|{^l#LGlg!D?E>2yR*eV%9m$_J6VGQ~AIh&P$_aFbh zULr0Z$QE!QpkP=aAeR4ny<#3Fwyw@rZf4?Ewq`;mCVv}xaz+3ni+}a=k~P+yaWt^L z@w67!DqVf7D%7XtXX5xBW;Co|HvQ8WR1k?r2cZD%U;2$bsM%u8{JUJ5Z0k= zZJARv^vFkmWx15CB=rb=D4${+#DVqy5$C%bf`!T0+epLJLnh1jwCdb*zuCL}eEFvE z{rO1%gxg>1!W(I!owu*mJZ0@6FM(?C+d*CeceZRW_4id*D9p5nzMY&{mWqrJomjIZ z97ZNnZ3_%Hx8dn;H>p8m7F#^2;T%yZ3H;a&N7tm=Lvs&lgJLW{V1@h&6Vy~!+Ffbb zv(n3+v)_D$}dqd!2>Y2B)#<+o}LH#%ogGi2-?xRIH)1!SD)u-L65B&bsJTC=LiaF+YOCif2dUX6uAA|#+vNR z>U+KQekVGon)Yi<93(d!(yw1h3&X0N(PxN2{%vn}cnV?rYw z$N^}_o!XUB!mckL`yO1rnUaI4wrOeQ(+&k?2mi47hzxSD`N#-byqd1IhEoh!PGq>t z_MRy{5B0eKY>;Ao3z$RUU7U+i?iX^&r739F)itdrTpAi-NN0=?^m%?{A9Ly2pVv>Lqs6moTP?T2-AHqFD-o_ znVr|7OAS#AEH}h8SRPQ@NGG47dO}l=t07__+iK8nHw^(AHx&Wb<%jPc$$jl6_p(b$ z)!pi(0fQodCHfM)KMEMUR&UID>}m^(!{C^U7sBDOA)$VThRCI0_+2=( zV8mMq0R(#z;C|7$m>$>`tX+T|xGt(+Y48@ZYu#z;0pCgYgmMVbFb!$?%yhZqP_nhn zy4<#3P1oQ#2b51NU1mGnHP$cf0j-YOgAA}A$QoL6JVLcmExs(kU{4z;PBHJD%_=0F z>+sQV`mzijSIT7xn%PiDKHOujX;n|M&qr1T@rOxTdxtZ!&u&3HHFLYD5$RLQ=heur zb>+AFokUVQeJy-#LP*^)spt{mb@Mqe=A~-4p0b+Bt|pZ+@CY+%x}9f}izU5;4&QFE zO1bhg&A4uC1)Zb67kuowWY4xbo&J=%yoXlFB)&$d*-}kjBu|w!^zbD1YPc0-#XTJr z)pm2RDy%J3jlqSMq|o%xGS$bPwn4AqitC6&e?pqWcjWPt{3I{>CBy;hg0Umh#c;hU3RhCUX=8aR>rmd` z7Orw(5tcM{|-^J?ZAA9KP|)X6n9$-kvr#j5YDecTM6n z&07(nD^qb8hpF0B^z^pQ*%5ePYkv&FabrlI61ntiVp!!C8y^}|<2xgAd#FY=8b*y( zuQOuvy2`Ii^`VBNJB&R!0{hABYX55ooCAJSSevl4RPqEGb)iy_0H}v@vFwFzD%>#I>)3PsouQ+_Kkbqy*kKdHdfkN7NBcq%V{x^fSxgXpg7$bF& zj!6AQbDY(1u#1_A#1UO9AxiZaCVN2F0wGXdY*g@x$ByvUA?ePdide0dmr#}udE%K| z3*k}Vv2Ew2u1FXBaVA6aerI36R&rzEZeDDCl5!t0J=ug6kuNZzH>3i_VN`%BsaVB3 zQYw|Xub_SGf{)F{$ZX5`Jc!X!;eybjP+o$I{Z^Hsj@D=E{MnnL+TbC@HEU2DjG{3-LDGIbq()U87x4eS;JXnSh;lRlJ z>EL3D>wHt-+wTjQF$fGyDO$>d+(fq@bPpLBS~xA~R=3JPbS{tzN(u~m#Po!?H;IYv zE;?8%^vle|%#oux(Lj!YzBKv+Fd}*Ur-dCBoX*t{KeNM*n~ZPYJ4NNKkI^MFbz9!v z4(Bvm*Kc!-$%VFEewYJKz-CQN{`2}KX4*CeJEs+Q(!kI%hN1!1P6iOq?ovz}X0IOi z)YfWpwW@pK08^69#wSyCZkX9?uZD?C^@rw^Y?gLS_xmFKkooyx$*^5#cPqntNTtSG zlP>XLMj2!VF^0k#ole7`-c~*~+_T5ls?x4)ah(j8vo_ zwb%S8qoaZqY0-$ZI+ViIA_1~~rAH7K_+yFS{0rT@eQtTAdz#8E5VpwnW!zJ_^{Utv zlW5Iar3V5t&H4D6A=>?mq;G92;1cg9a2sf;gY9pJDVKn$DYdQlvfXq}zz8#LyPGq@ z+`YUMD;^-6w&r-82JL7mA8&M~Pj@aK!m{0+^v<|t%APYf7`}jGEhdYLqsHW-Le9TL z_hZZ1gbrz7$f9^fAzVIP30^KIz!!#+DRLL+qMszvI_BpOSmjtl$hh;&UeM{ER@INV zcI}VbiVTPoN|iSna@=7XkP&-4#06C};8ajbxJ4Gcq8(vWv4*&X8bM^T$mBk75Q92j z1v&%a;OSKc8EIrodmIiw$lOES2hzGDcjjB`kEDfJe{r}yE6`eZL zEB`9u>Cl0IsQ+t}`-cx}{6jqcANucqIB>Qmga_&<+80E2Q|VHHQ$YlAt{6`Qu`HA3 z03s0-sSlwbvgi&_R8s={6<~M^pGvBNjKOa>tWenzS8s zR>L7R5aZ=mSU{f?ib4Grx$AeFvtO5N|D>9#)ChH#Fny2maHWHOf2G=#<9Myot#+4u zWVa6d^Vseq_0=#AYS(-m$Lp;*8nC_6jXIjEM`omUmtH@QDs3|G)i4j*#_?#UYVZvJ z?YjT-?!4Q{BNun;dKBWLEw2C-VeAz`%?A>p;)PL}TAZn5j~HK>v1W&anteARlE+~+ zj>c(F;?qO3pXBb|#OZdQnm<4xWmn~;DR5SDMxt0UK_F^&eD|KZ=O;tO3vy4@4h^;2 zUL~-z`-P1aOe?|ZC1BgVsL)2^J-&vIFI%q@40w0{jjEfeVl)i9(~bt2z#2Vm)p`V_ z1;6$Ae7=YXk#=Qkd24Y23t&GvRxaOoad~NbJ+6pxqzJ>FY#Td7@`N5xp!n(c!=RE& z&<<@^a$_Ys8jqz4|5Nk#FY$~|FPC0`*a5HH!|Gssa9=~66&xG9)|=pOOJ2KE5|YrR zw!w6K2aC=J$t?L-;}5hn6mHd%hC;p8P|Dgh6D>hGnXPgi;6r+eA=?f72y9(Cf_ho{ zH6#)uD&R=73^$$NE;5piWX2bzR67fQ)`b=85o0eOLGI4c-Tb@-KNi2pz=Ke@SDcPn za$AxXib84`!Sf;Z3B@TSo`Dz7GM5Kf(@PR>Ghzi=BBxK8wRp>YQoXm+iL>H*Jo9M3 z6w&E?BC8AFTFT&Tv8zf+m9<&S&%dIaZ)Aoqkak_$r-2{$d~0g2oLETx9Y`eOAf14QXEQw3tJne;fdzl@wV#TFXSLXM2428F-Q}t+n2g%vPRMUzYPvzQ9f# zu(liiJem9P*?0%V@RwA7F53r~|I!Ty)<*AsMX3J{_4&}{6pT%Tpw>)^|DJ)>gpS~1rNEh z0$D?uO8mG?H;2BwM5a*26^7YO$XjUm40XmBsb63MoR;bJh63J;OngS5sSI+o2HA;W zdZV#8pDpC9Oez&L8loZO)MClRz!_!WD&QRtQxnazhT%Vj6Wl4G11nUk8*vSeVab@N#oJ}`KyJv+8Mo@T1-pqZ1t|?cnaVOd;1(h9 z!$DrN=jcGsVYE-0-n?oCJ^4x)F}E;UaD-LZUIzcD?W^ficqJWM%QLy6QikrM1aKZC zi{?;oKwq^Vsr|&`i{jIphA8S6G4)$KGvpULjH%9u(Dq247;R#l&I0{IhcC|oBF*Al zvLo7Xte=C{aIt*otJD}BUq)|_pdR>{zBMT< z(^1RpZv*l*m*OV^8>9&asGBo8h*_4q*)-eCv*|Pq=XNGrZE)^(SF7^{QE_~4VDB(o zVcPA_!G+2CAtLbl+`=Q~9iW`4ZRLku!uB?;tWqVjB0lEOf}2RD7dJ=BExy=<9wkb- z9&7{XFA%n#JsHYN8t5d~=T~5DcW4$B%3M+nNvC2`0!#@sckqlzo5;hhGi(D9=*A4` z5ynobawSPRtWn&CDLEs3Xf`(8^zDP=NdF~F^s&={l7(aw&EG}KWpMjtmz7j_VLO;@ zM2NVLDxZ@GIv7*gzl1 zjq78tv*8#WSY`}Su0&C;2F$Ze(q>F(@Wm^Gw!)(j;dk9Ad{STaxn)IV9FZhm*n+U} zi;4y*3v%A`_c7a__DJ8D1b@dl0Std3F||4Wtvi)fCcBRh!X9$1x!_VzUh>*S5s!oq z;qd{J_r79EL2wIeiGAqFstWtkfIJpjVh%zFo*=55B9Zq~y0=^iqHWfQl@O!Ak;(o*m!pZqe9 z%U2oDOhR)BvW8&F70L;2TpkzIutIvNQaTjjs5V#8mV4!NQ}zN=i`i@WI1z0eN-iCS z;vL-Wxc^Vc_qK<5RPh(}*8dLT{~GzE{w2o$2kMFaEl&q zP{V=>&3kW7tWaK-Exy{~`v4J0U#OZBk{a9{&)&QG18L@6=bsZ1zC_d{{pKZ-Ey>I> z;8H0t4bwyQqgu4hmO`3|4K{R*5>qnQ&gOfdy?z`XD%e5+pTDzUt3`k^u~SaL&XMe= z9*h#kT(*Q9jO#w2Hd|Mr-%DV8i_1{J1MU~XJ3!WUplhXDYBpJH><0OU`**nIvPIof z|N8@I=wA)sf45SAvx||f?Z5uB$kz1qL3Ky_{%RPdP5iN-D2!p5scq}buuC00C@jom zhfGKm3|f?Z0iQ|K$Z~!`8{nmAS1r+fp6r#YDOS8V*;K&Gs7Lc&f^$RC66O|)28oh`NHy&vq zJh+hAw8+ybTB0@VhWN^0iiTnLsCWbS_y`^gs!LX!Lw{yE``!UVzrV24tP8o;I6-65 z1MUiHw^{bB15tmrVT*7-#sj6cs~z`wk52YQJ*TG{SE;KTm#Hf#a~|<(|ImHH17nNM z`Ub{+J3dMD!)mzC8b(2tZtokKW5pAwHa?NFiso~# z1*iaNh4lQ4TS)|@G)H4dZV@l*Vd;Rw;-;odDhW2&lJ%m@jz+Panv7LQm~2Js6rOW3 z0_&2cW^b^MYW3)@o;neZ<{B4c#m48dAl$GCc=$>ErDe|?y@z`$uq3xd(%aAsX)D%l z>y*SQ%My`yDP*zof|3@_w#cjaW_YW4BdA;#Glg1RQcJGY*CJ9`H{@|D+*e~*457kd z73p<%fB^PV!Ybw@)Dr%(ZJbX}xmCStCYv#K3O32ej{$9IzM^I{6FJ8!(=azt7RWf4 z7ib0UOPqN40X!wOnFOoddd8`!_IN~9O)#HRTyjfc#&MCZ zZAMzOVB=;qwt8gV?{Y2?b=iSZG~RF~uyx18K)IDFLl})G1v@$(s{O4@RJ%OTJyF+Cpcx4jmy|F3euCnMK!P2WTDu5j z{{gD$=M*pH!GGzL%P)V2*ROm>!$Y=z|D`!_yY6e7SU$~a5q8?hZGgaYqaiLnkK%?0 zs#oI%;zOxF@g*@(V4p!$7dS1rOr6GVs6uYCTt2h)eB4?(&w8{#o)s#%gN@BBosRUe z)@P@8_Zm89pr~)b>e{tbPC~&_MR--iB{=)y;INU5#)@Gix-YpgP<-c2Ms{9zuCX|3 z!p(?VaXww&(w&uBHzoT%!A2=3HAP>SDxcljrego7rY|%hxy3XlODWffO_%g|l+7Y_ zqV(xbu)s4lV=l7M;f>vJl{`6qBm>#ZeMA}kXb97Z)?R97EkoI?x6Lp0yu1Z>PS?2{ z0QQ(8D)|lc9CO3B~e(pQM&5(1y&y=e>C^X$`)_&XuaI!IgDTVqt31wX#n+@!a_A0ZQkA zCJ2@M_4Gb5MfCrm5UPggeyh)8 zO9?`B0J#rkoCx(R0I!ko_2?iO@|oRf1;3r+i)w-2&j?=;NVIdPFsB)`|IC0zk6r9c zRrkfxWsiJ(#8QndNJj@{@WP2Ackr|r1VxV{7S&rSU(^)-M8gV>@UzOLXu9K<{6e{T zXJ6b92r$!|lwjhmgqkdswY&}c)KW4A)-ac%sU;2^fvq7gfUW4Bw$b!i@duy1CAxSn z(pyh$^Z=&O-q<{bZUP+$U}=*#M9uVc>CQVgDs4swy5&8RAHZ~$)hrTF4W zPsSa~qYv_0mJnF89RnnJTH`3}w4?~epFl=D(35$ zWa07ON$`OMBOHgCmfO(9RFc<)?$x)N}Jd2A(<*Ll7+4jrRt9w zwGxExUXd9VB#I|DwfxvJ;HZ8Q{37^wDhaZ%O!oO(HpcqfLH%#a#!~;Jl7F5>EX_=8 z{()l2NqPz>La3qJR;_v+wlK>GsHl;uRA8%j`A|yH@k5r%55S9{*Cp%uw6t`qc1!*T za2OeqtQj7sAp#Q~=5Fs&aCR9v>5V+s&RdNvo&H~6FJOjvaj--2sYYBvMq;55%z8^o z|BJDA4vzfow#DO#ZQHh;Oq_{r+qP{R9ox2TOgwQiv7Ow!zjN+A@BN;0tA2lUb#+zO z(^b89eV)D7UVE+h{mcNc6&GtpOqDn_?VAQ)Vob$hlFwW%xh>D#wml{t&Ofmm_d_+; zKDxzdr}`n2Rw`DtyIjrG)eD0vut$}dJAZ0AohZ+ZQdWXn_Z@dI_y=7t3q8x#pDI-K z2VVc&EGq445Rq-j0=U=Zx`oBaBjsefY;%)Co>J3v4l8V(T8H?49_@;K6q#r~Wwppc z4XW0(4k}cP=5ex>-Xt3oATZ~bBWKv)aw|I|Lx=9C1s~&b77idz({&q3T(Y(KbWO?+ zmcZ6?WeUsGk6>km*~234YC+2e6Zxdl~<_g2J|IE`GH%n<%PRv-50; zH{tnVts*S5*_RxFT9eM0z-pksIb^drUq4>QSww=u;UFCv2AhOuXE*V4z?MM`|ABOC4P;OfhS(M{1|c%QZ=!%rQTDFx`+}?Kdx$&FU?Y<$x;j7z=(;Lyz+?EE>ov!8vvMtSzG!nMie zsBa9t8as#2nH}n8xzN%W%U$#MHNXmDUVr@GX{?(=yI=4vks|V)!-W5jHsU|h_&+kY zS_8^kd3jlYqOoiI`ZqBVY!(UfnAGny!FowZWY_@YR0z!nG7m{{)4OS$q&YDyw6vC$ zm4!$h>*|!2LbMbxS+VM6&DIrL*X4DeMO!@#EzMVfr)e4Tagn~AQHIU8?e61TuhcKD zr!F4(kEebk(Wdk-?4oXM(rJwanS>Jc%<>R(siF+>+5*CqJLecP_we33iTFTXr6W^G z7M?LPC-qFHK;E!fxCP)`8rkxZyFk{EV;G-|kwf4b$c1k0atD?85+|4V%YATWMG|?K zLyLrws36p%Qz6{}>7b>)$pe>mR+=IWuGrX{3ZPZXF3plvuv5Huax86}KX*lbPVr}L z{C#lDjdDeHr~?l|)Vp_}T|%$qF&q#U;ClHEPVuS+Jg~NjC1RP=17=aQKGOcJ6B3mp z8?4*-fAD~}sX*=E6!}^u8)+m2j<&FSW%pYr_d|p_{28DZ#Cz0@NF=gC-o$MY?8Ca8 zr5Y8DSR^*urS~rhpX^05r30Ik#2>*dIOGxRm0#0YX@YQ%Mg5b6dXlS!4{7O_kdaW8PFSdj1=ryI-=5$fiieGK{LZ+SX(1b=MNL!q#lN zv98?fqqTUH8r8C7v(cx#BQ5P9W>- zmW93;eH6T`vuJ~rqtIBg%A6>q>gnWb3X!r0wh_q;211+Om&?nvYzL1hhtjB zK_7G3!n7PL>d!kj){HQE zE8(%J%dWLh1_k%gVXTZt zEdT09XSKAx27Ncaq|(vzL3gm83q>6CAw<$fTnMU05*xAe&rDfCiu`u^1)CD<>sx0i z*hr^N_TeN89G(nunZoLBf^81#pmM}>JgD@Nn1l*lN#a=B=9pN%tmvYFjFIoKe_(GF z-26x{(KXdfsQL7Uv6UtDuYwV`;8V3w>oT_I<`Ccz3QqK9tYT5ZQzbop{=I=!pMOCb zCU68`n?^DT%^&m>A%+-~#lvF!7`L7a{z<3JqIlk1$<||_J}vW1U9Y&eX<}l8##6i( zZcTT@2`9(Mecptm@{3A_Y(X`w9K0EwtPq~O!16bq{7c0f7#(3wn-^)h zxV&M~iiF!{-6A@>o;$RzQ5A50kxXYj!tcgme=Qjrbje~;5X2xryU;vH|6bE(8z^<7 zQ>BG7_c*JG8~K7Oe68i#0~C$v?-t@~@r3t2inUnLT(c=URpA9kA8uq9PKU(Ps(LVH zqgcqW>Gm?6oV#AldDPKVRcEyQIdTT`Qa1j~vS{<;SwyTdr&3*t?J)y=M7q*CzucZ&B0M=joT zBbj@*SY;o2^_h*>R0e({!QHF0=)0hOj^B^d*m>SnRrwq>MolNSgl^~r8GR#mDWGYEIJA8B<|{{j?-7p zVnV$zancW3&JVDtVpIlI|5djKq0(w$KxEFzEiiL=h5Jw~4Le23@s(mYyXWL9SX6Ot zmb)sZaly_P%BeX_9 zw&{yBef8tFm+%=--m*J|o~+Xg3N+$IH)t)=fqD+|fEk4AAZ&!wcN5=mi~Vvo^i`}> z#_3ahR}Ju)(Px7kev#JGcSwPXJ2id9%Qd2A#Uc@t8~egZ8;iC{e! z%=CGJOD1}j!HW_sgbi_8suYnn4#Ou}%9u)dXd3huFIb!ytlX>Denx@pCS-Nj$`VO&j@(z!kKSP0hE4;YIP#w9ta=3DO$7f*x zc9M4&NK%IrVmZAe=r@skWD`AEWH=g+r|*13Ss$+{c_R!b?>?UaGXlw*8qDmY#xlR= z<0XFbs2t?8i^G~m?b|!Hal^ZjRjt<@a? z%({Gn14b4-a|#uY^=@iiKH+k?~~wTj5K1A&hU z2^9-HTC)7zpoWK|$JXaBL6C z#qSNYtY>65T@Zs&-0cHeu|RX(Pxz6vTITdzJdYippF zC-EB+n4}#lM7`2Ry~SO>FxhKboIAF#Z{1wqxaCb{#yEFhLuX;Rx(Lz%T`Xo1+a2M}7D+@wol2)OJs$TwtRNJ={( zD@#zTUEE}#Fz#&(EoD|SV#bayvr&E0vzmb%H?o~46|FAcx?r4$N z&67W3mdip-T1RIxwSm_&(%U|+WvtGBj*}t69XVd&ebn>KOuL(7Y8cV?THd-(+9>G7*Nt%T zcH;`p={`SOjaf7hNd(=37Lz3-51;58JffzIPgGs_7xIOsB5p2t&@v1mKS$2D$*GQ6 zM(IR*j4{nri7NMK9xlDy-hJW6sW|ZiDRaFiayj%;(%51DN!ZCCCXz+0Vm#};70nOx zJ#yA0P3p^1DED;jGdPbQWo0WATN=&2(QybbVdhd=Vq*liDk`c7iZ?*AKEYC#SY&2g z&Q(Ci)MJ{mEat$ZdSwTjf6h~roanYh2?9j$CF@4hjj_f35kTKuGHvIs9}Re@iKMxS-OI*`0S z6s)fOtz}O$T?PLFVSeOjSO26$@u`e<>k(OSP!&YstH3ANh>)mzmKGNOwOawq-MPXe zy4xbeUAl6tamnx))-`Gi2uV5>9n(73yS)Ukma4*7fI8PaEwa)dWHs6QA6>$}7?(L8 ztN8M}?{Tf!Zu22J5?2@95&rQ|F7=FK-hihT-vDp!5JCcWrVogEnp;CHenAZ)+E+K5 z$Cffk5sNwD_?4+ymgcHR(5xgt20Z8M`2*;MzOM#>yhk{r3x=EyM226wb&!+j`W<%* zSc&|`8!>dn9D@!pYow~(DsY_naSx7(Z4i>cu#hA5=;IuI88}7f%)bRkuY2B;+9Uep zpXcvFWkJ!mQai63BgNXG26$5kyhZ2&*3Q_tk)Ii4M>@p~_~q_cE!|^A;_MHB;7s#9 zKzMzK{lIxotjc};k67^Xsl-gS!^*m*m6kn|sbdun`O?dUkJ{0cmI0-_2y=lTAfn*Y zKg*A-2sJq)CCJgY0LF-VQvl&6HIXZyxo2#!O&6fOhbHXC?%1cMc6y^*dOS{f$=137Ds1m01qs`>iUQ49JijsaQ( zksqV9@&?il$|4Ua%4!O15>Zy&%gBY&wgqB>XA3!EldQ%1CRSM(pp#k~-pkcCg4LAT zXE=puHbgsw)!xtc@P4r~Z}nTF=D2~j(6D%gTBw$(`Fc=OOQ0kiW$_RDd=hcO0t97h zb86S5r=>(@VGy1&#S$Kg_H@7G^;8Ue)X5Y+IWUi`o;mpvoV)`fcVk4FpcT|;EG!;? zHG^zrVVZOm>1KFaHlaogcWj(v!S)O(Aa|Vo?S|P z5|6b{qkH(USa*Z7-y_Uvty_Z1|B{rTS^qmEMLEYUSk03_Fg&!O3BMo{b^*`3SHvl0 zhnLTe^_vVIdcSHe)SQE}r~2dq)VZJ!aSKR?RS<(9lzkYo&dQ?mubnWmgMM37Nudwo z3Vz@R{=m2gENUE3V4NbIzAA$H1z0pagz94-PTJyX{b$yndsdKptmlKQKaaHj@3=ED zc7L?p@%ui|RegVYutK$64q4pe9+5sv34QUpo)u{1ci?)_7gXQd{PL>b0l(LI#rJmN zGuO+%GO`xneFOOr4EU(Wg}_%bhzUf;d@TU+V*2#}!2OLwg~%D;1FAu=Un>OgjPb3S z7l(riiCwgghC=Lm5hWGf5NdGp#01xQ59`HJcLXbUR3&n%P(+W2q$h2Qd z*6+-QXJ*&Kvk9ht0f0*rO_|FMBALen{j7T1l%=Q>gf#kma zQlg#I9+HB+z*5BMxdesMND`_W;q5|FaEURFk|~&{@qY32N$G$2B=&Po{=!)x5b!#n zxLzblkq{yj05#O7(GRuT39(06FJlalyv<#K4m}+vs>9@q-&31@1(QBv82{}Zkns~K ze{eHC_RDX0#^A*JQTwF`a=IkE6Ze@j#-8Q`tTT?k9`^ZhA~3eCZJ-Jr{~7Cx;H4A3 zcZ+Zj{mzFZbVvQ6U~n>$U2ZotGsERZ@}VKrgGh0xM;Jzt29%TX6_&CWzg+YYMozrM z`nutuS)_0dCM8UVaKRj804J4i%z2BA_8A4OJRQ$N(P9Mfn-gF;4#q788C@9XR0O3< zsoS4wIoyt046d+LnSCJOy@B@Uz*#GGd#+Ln1ek5Dv>(ZtD@tgZlPnZZJGBLr^JK+!$$?A_fA3LOrkoDRH&l7 zcMcD$Hsjko3`-{bn)jPL6E9Ds{WskMrivsUu5apD z?grQO@W7i5+%X&E&p|RBaEZ(sGLR@~(y^BI@lDMot^Ll?!`90KT!JXUhYS`ZgX3jnu@Ja^seA*M5R@f`=`ynQV4rc$uT1mvE?@tz)TN<=&H1%Z?5yjxcpO+6y_R z6EPuPKM5uxKpmZfT(WKjRRNHs@ib)F5WAP7QCADvmCSD#hPz$V10wiD&{NXyEwx5S z6NE`3z!IS^$s7m}PCwQutVQ#~w+V z=+~->DI*bR2j0^@dMr9`p>q^Ny~NrAVxrJtX2DUveic5vM%#N*XO|?YAWwNI$Q)_) zvE|L(L1jP@F%gOGtnlXtIv2&1i8q<)Xfz8O3G^Ea~e*HJsQgBxWL(yuLY+jqUK zRE~`-zklrGog(X}$9@ZVUw!8*=l`6mzYLtsg`AvBYz(cxmAhr^j0~(rzXdiOEeu_p zE$sf2(w(BPAvO5DlaN&uQ$4@p-b?fRs}d7&2UQ4Fh?1Hzu*YVjcndqJLw0#q@fR4u zJCJ}>_7-|QbvOfylj+e^_L`5Ep9gqd>XI3-O?Wp z-gt*P29f$Tx(mtS`0d05nHH=gm~Po_^OxxUwV294BDKT>PHVlC5bndncxGR!n(OOm znsNt@Q&N{TLrmsoKFw0&_M9$&+C24`sIXGWgQaz=kY;S{?w`z^Q0JXXBKFLj0w0U6P*+jPKyZHX9F#b0D1$&(- zrm8PJd?+SrVf^JlfTM^qGDK&-p2Kdfg?f>^%>1n8bu&byH(huaocL>l@f%c*QkX2i znl}VZ4R1en4S&Bcqw?$=Zi7ohqB$Jw9x`aM#>pHc0x z0$!q7iFu zZ`tryM70qBI6JWWTF9EjgG@>6SRzsd}3h+4D8d~@CR07P$LJ}MFsYi-*O%XVvD@yT|rJ+Mk zDllJ7$n0V&A!0flbOf)HE6P_afPWZmbhpliqJuw=-h+r;WGk|ntkWN(8tKlYpq5Ow z(@%s>IN8nHRaYb*^d;M(D$zGCv5C|uqmsDjwy4g=Lz>*OhO3z=)VD}C<65;`89Ye} zSCxrv#ILzIpEx1KdLPlM&%Cctf@FqTKvNPXC&`*H9=l=D3r!GLM?UV zOxa(8ZsB`&+76S-_xuj?G#wXBfDY@Z_tMpXJS7^mp z@YX&u0jYw2A+Z+bD#6sgVK5ZgdPSJV3>{K^4~%HV?rn~4D)*2H!67Y>0aOmzup`{D zzDp3c9yEbGCY$U<8biJ_gB*`jluz1ShUd!QUIQJ$*1;MXCMApJ^m*Fiv88RZ zFopLViw}{$Tyhh_{MLGIE2~sZ)t0VvoW%=8qKZ>h=adTe3QM$&$PO2lfqH@brt!9j ziePM8$!CgE9iz6B<6_wyTQj?qYa;eC^{x_0wuwV~W+^fZmFco-o%wsKSnjXFEx02V zF5C2t)T6Gw$Kf^_c;Ei3G~uC8SM-xyycmXyC2hAVi-IfXqhu$$-C=*|X?R0~hu z8`J6TdgflslhrmDZq1f?GXF7*ALeMmOEpRDg(s*H`4>_NAr`2uqF;k;JQ+8>A|_6ZNsNLECC%NNEb1Y1dP zbIEmNpK)#XagtL4R6BC{C5T(+=yA-(Z|Ap}U-AfZM#gwVpus3(gPn}Q$CExObJ5AC z)ff9Yk?wZ}dZ-^)?cbb9Fw#EjqQ8jxF4G3=L?Ra zg_)0QDMV1y^A^>HRI$x?Op@t;oj&H@1xt4SZ9(kifQ zb59B*`M99Td7@aZ3UWvj1rD0sE)d=BsBuW*KwkCds7ay(7*01_+L}b~7)VHI>F_!{ zyxg-&nCO?v#KOUec0{OOKy+sjWA;8rTE|Lv6I9H?CI?H(mUm8VXGwU$49LGpz&{nQp2}dinE1@lZ1iox6{ghN&v^GZv9J${7WaXj)<0S4g_uiJ&JCZ zr8-hsu`U%N;+9N^@&Q0^kVPB3)wY(rr}p7{p0qFHb3NUUHJb672+wRZs`gd1UjKPX z4o6zljKKA+Kkj?H>Ew63o%QjyBk&1!P22;MkD>sM0=z_s-G{mTixJCT9@_|*(p^bz zJ8?ZZ&;pzV+7#6Mn`_U-)k8Pjg?a;|Oe^us^PoPY$Va~yi8|?+&=y$f+lABT<*pZr zP}D{~Pq1Qyni+@|aP;ixO~mbEW9#c0OU#YbDZIaw=_&$K%Ep2f%hO^&P67hApZe`x zv8b`Mz@?M_7-)b!lkQKk)JXXUuT|B8kJlvqRmRpxtQDgvrHMXC1B$M@Y%Me!BSx3P z#2Eawl$HleZhhTS6Txm>lN_+I`>eV$&v9fOg)%zVn3O5mI*lAl>QcHuW6!Kixmq`X zBCZ*Ck6OYtDiK!N47>jxI&O2a9x7M|i^IagRr-fmrmikEQGgw%J7bO|)*$2FW95O4 zeBs>KR)izRG1gRVL;F*sr8A}aRHO0gc$$j&ds8CIO1=Gwq1%_~E)CWNn9pCtBE}+`Jelk4{>S)M)`Ll=!~gnn1yq^EX(+y*ik@3Ou0qU`IgYi3*doM+5&dU!cho$pZ zn%lhKeZkS72P?Cf68<#kll_6OAO26bIbueZx**j6o;I0cS^XiL`y+>{cD}gd%lux} z)3N>MaE24WBZ}s0ApfdM;5J_Ny}rfUyxfkC``Awo2#sgLnGPewK};dORuT?@I6(5~ z?kE)Qh$L&fwJXzK){iYx!l5$Tt|^D~MkGZPA}(o6f7w~O2G6Vvzdo*a;iXzk$B66$ zwF#;wM7A+(;uFG4+UAY(2`*3XXx|V$K8AYu#ECJYSl@S=uZW$ksfC$~qrrbQj4??z-)uz0QL}>k^?fPnJTPw% zGz)~?B4}u0CzOf@l^um}HZzbaIwPmb<)< zi_3@E9lc)Qe2_`*Z^HH;1CXOceL=CHpHS{HySy3T%<^NrWQ}G0i4e1xm_K3(+~oi$ zoHl9wzb?Z4j#90DtURtjtgvi7uw8DzHYmtPb;?%8vb9n@bszT=1qr)V_>R%s!92_` zfnHQPANx z<#hIjIMm#*(v*!OXtF+w8kLu`o?VZ5k7{`vw{Yc^qYclpUGIM_PBN1+c{#Vxv&E*@ zxg=W2W~JuV{IuRYw3>LSI1)a!thID@R=bU+cU@DbR^_SXY`MC7HOsCN z!dO4OKV7(E_Z8T#8MA1H`99?Z!r0)qKW_#|29X3#Jb+5+>qUidbeP1NJ@)(qi2S-X zao|f0_tl(O+$R|Qwd$H{_ig|~I1fbp_$NkI!0E;Y z6JrnU{1Ra6^on{9gUUB0mwzP3S%B#h0fjo>JvV~#+X0P~JV=IG=yHG$O+p5O3NUgG zEQ}z6BTp^Fie)Sg<){Z&I8NwPR(=mO4joTLHkJ>|Tnk23E(Bo`FSbPc05lF2-+)X? z6vV3*m~IBHTy*^E!<0nA(tCOJW2G4DsH7)BxLV8kICn5lu6@U*R`w)o9;Ro$i8=Q^V%uH8n3q=+Yf;SFRZu z!+F&PKcH#8cG?aSK_Tl@K9P#8o+jry@gdexz&d(Q=47<7nw@e@FFfIRNL9^)1i@;A z28+$Z#rjv-wj#heI|<&J_DiJ*s}xd-f!{J8jfqOHE`TiHHZVIA8CjkNQ_u;Ery^^t zl1I75&u^`1_q)crO+JT4rx|z2ToSC>)Or@-D zy3S>jW*sNIZR-EBsfyaJ+Jq4BQE4?SePtD2+jY8*%FsSLZ9MY>+wk?}}}AFAw)vr{ml)8LUG-y9>^t!{~|sgpxYc0Gnkg`&~R z-pilJZjr@y5$>B=VMdZ73svct%##v%wdX~9fz6i3Q-zOKJ9wso+h?VME7}SjL=!NUG{J?M&i!>ma`eoEa@IX`5G>B1(7;%}M*%-# zfhJ(W{y;>MRz!Ic8=S}VaBKqh;~7KdnGEHxcL$kA-6E~=!hrN*zw9N+_=odt<$_H_8dbo;0=42wcAETPCVGUr~v(`Uai zb{=D!Qc!dOEU6v)2eHSZq%5iqK?B(JlCq%T6av$Cb4Rko6onlG&?CqaX7Y_C_cOC3 zYZ;_oI(}=>_07}Oep&Ws7x7-R)cc8zfe!SYxJYP``pi$FDS)4Fvw5HH=FiU6xfVqIM!hJ;Rx8c0cB7~aPtNH(Nmm5Vh{ibAoU#J6 zImRCr?(iyu_4W_6AWo3*vxTPUw@vPwy@E0`(>1Qi=%>5eSIrp^`` zK*Y?fK_6F1W>-7UsB)RPC4>>Ps9)f+^MqM}8AUm@tZ->j%&h1M8s*s!LX5&WxQcAh z8mciQej@RPm?660%>{_D+7er>%zX_{s|$Z+;G7_sfNfBgY(zLB4Ey}J9F>zX#K0f6 z?dVNIeEh?EIShmP6>M+d|0wMM85Sa4diw1hrg|ITJ}JDg@o8y>(rF9mXk5M z2@D|NA)-7>wD&wF;S_$KS=eE84`BGw3g0?6wGxu8ys4rwI?9U=*^VF22t3%mbGeOh z`!O-OpF7#Vceu~F`${bW0nYVU9ecmk31V{tF%iv&5hWofC>I~cqAt@u6|R+|HLMMX zVxuSlMFOK_EQ86#E8&KwxIr8S9tj_goWtLv4f@!&h8;Ov41{J~496vp9vX=(LK#j! zAwi*21RAV-LD>9Cw3bV_9X(X3)Kr0-UaB*7Y>t82EQ%!)(&(XuAYtTsYy-dz+w=$ir)VJpe!_$ z6SGpX^i(af3{o=VlFPC);|J8#(=_8#vdxDe|Cok+ANhYwbE*FO`Su2m1~w+&9<_9~ z-|tTU_ACGN`~CNW5WYYBn^B#SwZ(t4%3aPp z;o)|L6Rk569KGxFLUPx@!6OOa+5OjQLK5w&nAmwxkC5rZ|m&HT8G%GVZxB_@ME z>>{rnXUqyiJrT(8GMj_ap#yN_!9-lO5e8mR3cJiK3NE{_UM&=*vIU`YkiL$1%kf+1 z4=jk@7EEj`u(jy$HnzE33ZVW_J4bj}K;vT?T91YlO(|Y0FU4r+VdbmQ97%(J5 zkK*Bed8+C}FcZ@HIgdCMioV%A<*4pw_n}l*{Cr4}a(lq|injK#O?$tyvyE`S%(1`H z_wwRvk#13ElkZvij2MFGOj`fhy?nC^8`Zyo%yVcUAfEr8x&J#A{|moUBAV_^f$hpaUuyQeY3da^ zS9iRgf87YBwfe}>BO+T&Fl%rfpZh#+AM?Dq-k$Bq`vG6G_b4z%Kbd&v>qFjow*mBl z-OylnqOpLg}or7_VNwRg2za3VBK6FUfFX{|TD z`Wt0Vm2H$vdlRWYQJqDmM?JUbVqL*ZQY|5&sY*?!&%P8qhA~5+Af<{MaGo(dl&C5t zE%t!J0 zh6jqANt4ABdPxSTrVV}fLsRQal*)l&_*rFq(Ez}ClEH6LHv{J#v?+H-BZ2)Wy{K@9 z+ovXHq~DiDvm>O~r$LJo!cOuwL+Oa--6;UFE2q@g3N8Qkw5E>ytz^(&($!O47+i~$ zKM+tkAd-RbmP{s_rh+ugTD;lriL~`Xwkad#;_aM?nQ7L_muEFI}U_4$phjvYgleK~`Fo`;GiC07&Hq1F<%p;9Q;tv5b?*QnR%8DYJH3P>Svmv47Y>*LPZJy8_{9H`g6kQpyZU{oJ`m%&p~D=K#KpfoJ@ zn-3cqmHsdtN!f?~w+(t+I`*7GQA#EQC^lUA9(i6=i1PqSAc|ha91I%X&nXzjYaM{8$s&wEx@aVkQ6M{E2 zfzId#&r(XwUNtPcq4Ngze^+XaJA1EK-%&C9j>^9(secqe{}z>hR5CFNveMsVA)m#S zk)_%SidkY-XmMWlVnQ(mNJ>)ooszQ#vaK;!rPmGKXV7am^_F!Lz>;~{VrIO$;!#30XRhE1QqO_~#+Ux;B_D{Nk=grn z8Y0oR^4RqtcYM)7a%@B(XdbZCOqnX#fD{BQTeLvRHd(irHKq=4*jq34`6@VAQR8WG z^%)@5CXnD_T#f%@-l${>y$tfb>2LPmc{~5A82|16mH)R?&r#KKLs7xpN-D`=&Cm^R zvMA6#Ahr<3X>Q7|-qfTY)}32HkAz$_mibYV!I)u>bmjK`qwBe(>za^0Kt*HnFbSdO z1>+ryKCNxmm^)*$XfiDOF2|{-v3KKB?&!(S_Y=Ht@|ir^hLd978xuI&N{k>?(*f8H z=ClxVJK_%_z1TH0eUwm2J+2To7FK4o+n_na)&#VLn1m;!+CX+~WC+qg1?PA~KdOlC zW)C@pw75_xoe=w7i|r9KGIvQ$+3K?L{7TGHwrQM{dCp=Z*D}3kX7E-@sZnup!BImw z*T#a=+WcTwL78exTgBn|iNE3#EsOorO z*kt)gDzHiPt07fmisA2LWN?AymkdqTgr?=loT7z@d`wnlr6oN}@o|&JX!yPzC*Y8d zu6kWlTzE1)ckyBn+0Y^HMN+GA$wUO_LN6W>mxCo!0?oiQvT`z$jbSEu&{UHRU0E8# z%B^wOc@S!yhMT49Y)ww(Xta^8pmPCe@eI5C*ed96)AX9<>))nKx0(sci8gwob_1}4 z0DIL&vsJ1_s%<@y%U*-eX z5rN&(zef-5G~?@r79oZGW1d!WaTqQn0F6RIOa9tJ=0(kdd{d1{<*tHT#cCvl*i>YY zH+L7jq8xZNcTUBqj(S)ztTU!TM!RQ}In*n&Gn<>(60G7}4%WQL!o>hbJqNDSGwl#H z`4k+twp0cj%PsS+NKaxslAEu9!#U3xT1|_KB6`h=PI0SW`P9GTa7caD1}vKEglV8# zjKZR`pluCW19c2fM&ZG)c3T3Um;ir3y(tSCJ7Agl6|b524dy5El{^EQBG?E61H0XY z`bqg!;zhGhyMFl&(o=JWEJ8n~z)xI}A@C0d2hQGvw7nGv)?POU@(kS1m=%`|+^ika zXl8zjS?xqW$WlO?Ewa;vF~XbybHBor$f<%I&*t$F5fynwZlTGj|IjZtVfGa7l&tK} zW>I<69w(cZLu)QIVG|M2xzW@S+70NinQzk&Y0+3WT*cC)rx~04O-^<{JohU_&HL5XdUKW!uFy|i$FB|EMu0eUyW;gsf`XfIc!Z0V zeK&*hPL}f_cX=@iv>K%S5kL;cl_$v?n(Q9f_cChk8Lq$glT|=e+T*8O4H2n<=NGmn z+2*h+v;kBvF>}&0RDS>)B{1!_*XuE8A$Y=G8w^qGMtfudDBsD5>T5SB;Qo}fSkkiV ze^K^M(UthkwrD!&*tTsu>Dacdj_q`~V%r_twr$(Ct&_dKeeXE?fA&4&yASJWJ*}~- zel=@W)tusynfC_YqH4ll>4Eg`Xjs5F7Tj>tTLz<0N3)X<1px_d2yUY>X~y>>93*$) z5PuNMQLf9Bu?AAGO~a_|J2akO1M*@VYN^VxvP0F$2>;Zb9;d5Yfd8P%oFCCoZE$ z4#N$^J8rxYjUE_6{T%Y>MmWfHgScpuGv59#4u6fpTF%~KB^Ae`t1TD_^Ud#DhL+Dm zbY^VAM#MrAmFj{3-BpVSWph2b_Y6gCnCAombVa|1S@DU)2r9W<> zT5L8BB^er3zxKt1v(y&OYk!^aoQisqU zH(g@_o)D~BufUXcPt!Ydom)e|aW{XiMnes2z&rE?og>7|G+tp7&^;q?Qz5S5^yd$i z8lWr4g5nctBHtigX%0%XzIAB8U|T6&JsC4&^hZBw^*aIcuNO47de?|pGXJ4t}BB`L^d8tD`H`i zqrP8?#J@8T#;{^B!KO6J=@OWKhAerih(phML`(Rg7N1XWf1TN>=Z3Do{l_!d~DND&)O)D>ta20}@Lt77qSnVsA7>)uZAaT9bsB>u&aUQl+7GiY2|dAEg@%Al3i316y;&IhQL^8fw_nwS>f60M_-m+!5)S_6EPM7Y)(Nq^8gL7(3 zOiot`6Wy6%vw~a_H?1hLVzIT^i1;HedHgW9-P#)}Y6vF%C=P70X0Tk^z9Te@kPILI z_(gk!k+0%CG)%!WnBjjw*kAKs_lf#=5HXC00s-}oM-Q1aXYLj)(1d!_a7 z*Gg4Fe6F$*ujVjI|79Z5+Pr`us%zW@ln++2l+0hsngv<{mJ%?OfSo_3HJXOCys{Ug z00*YR-(fv<=&%Q!j%b-_ppA$JsTm^_L4x`$k{VpfLI(FMCap%LFAyq;#ns5bR7V+x zO!o;c5y~DyBPqdVQX)8G^G&jWkBy2|oWTw>)?5u}SAsI$RjT#)lTV&Rf8;>u*qXnb z8F%Xb=7#$m)83z%`E;49)t3fHInhtc#kx4wSLLms!*~Z$V?bTyUGiS&m>1P(952(H zuHdv=;o*{;5#X-uAyon`hP}d#U{uDlV?W?_5UjJvf%11hKwe&(&9_~{W)*y1nR5f_ z!N(R74nNK`y8>B!0Bt_Vr!;nc3W>~RiKtGSBkNlsR#-t^&;$W#)f9tTlZz>n*+Fjz z3zXZ;jf(sTM(oDzJt4FJS*8c&;PLTW(IQDFs_5QPy+7yhi1syPCarvqrHFcf&yTy)^O<1EBx;Ir`5W{TIM>{8w&PB>ro4;YD<5LF^TjTb0!zAP|QijA+1Vg>{Afv^% zmrkc4o6rvBI;Q8rj4*=AZacy*n8B{&G3VJc)so4$XUoie0)vr;qzPZVbb<#Fc=j+8CGBWe$n|3K& z_@%?{l|TzKSlUEO{U{{%Fz_pVDxs7i9H#bnbCw7@4DR=}r_qV!Zo~CvD4ZI*+j3kO zW6_=|S`)(*gM0Z;;}nj`73OigF4p6_NPZQ-Od~e$c_);;4-7sR>+2u$6m$Gf%T{aq zle>e3(*Rt(TPD}03n5)!Ca8Pu!V}m6v0o1;5<1h$*|7z|^(3$Y&;KHKTT}hV056wuF0Xo@mK-52~r=6^SI1NC%c~CC?n>yX6wPTgiWYVz!Sx^atLby9YNn1Rk{g?|pJaxD4|9cUf|V1_I*w zzxK)hRh9%zOl=*$?XUjly5z8?jPMy%vEN)f%T*|WO|bp5NWv@B(K3D6LMl!-6dQg0 zXNE&O>Oyf%K@`ngCvbGPR>HRg5!1IV$_}m@3dWB7x3t&KFyOJn9pxRXCAzFr&%37wXG;z^xaO$ekR=LJG ztIHpY8F5xBP{mtQidqNRoz= z@){+N3(VO5bD+VrmS^YjG@+JO{EOIW)9=F4v_$Ed8rZtHvjpiEp{r^c4F6Ic#ChlC zJX^DtSK+v(YdCW)^EFcs=XP7S>Y!4=xgmv>{S$~@h=xW-G4FF9?I@zYN$e5oF9g$# zb!eVU#J+NjLyX;yb)%SY)xJdvGhsnE*JEkuOVo^k5PyS=o#vq!KD46UTW_%R=Y&0G zFj6bV{`Y6)YoKgqnir2&+sl+i6foAn-**Zd1{_;Zb7Ki=u394C5J{l^H@XN`_6XTKY%X1AgQM6KycJ+= zYO=&t#5oSKB^pYhNdzPgH~aEGW2=ec1O#s-KG z71}LOg@4UEFtp3GY1PBemXpNs6UK-ax*)#$J^pC_me;Z$Je(OqLoh|ZrW*mAMBFn< zHttjwC&fkVfMnQeen8`Rvy^$pNRFVaiEN4Pih*Y3@jo!T0nsClN)pdrr9AYLcZxZ| zJ5Wlj+4q~($hbtuY zVQ7hl>4-+@6g1i`1a)rvtp-;b0>^`Dloy(#{z~ytgv=j4q^Kl}wD>K_Y!l~ zp(_&7sh`vfO(1*MO!B%<6E_bx1)&s+Ae`O)a|X=J9y~XDa@UB`m)`tSG4AUhoM=5& znWoHlA-(z@3n0=l{E)R-p8sB9XkV zZ#D8wietfHL?J5X0%&fGg@MH~(rNS2`GHS4xTo7L$>TPme+Is~!|79=^}QbPF>m%J zFMkGzSndiPO|E~hrhCeo@&Ea{M(ieIgRWMf)E}qeTxT8Q#g-!Lu*x$v8W^M^>?-g= zwMJ$dThI|~M06rG$Sv@C@tWR>_YgaG&!BAbkGggVQa#KdtDB)lMLNVLN|51C@F^y8 zCRvMB^{GO@j=cHfmy}_pCGbP%xb{pNN>? z?7tBz$1^zVaP|uaatYaIN+#xEN4jBzwZ|YI_)p(4CUAz1ZEbDk>J~Y|63SZaak~#0 zoYKruYsWHoOlC1(MhTnsdUOwQfz5p6-D0}4;DO$B;7#M{3lSE^jnTT;ns`>!G%i*F?@pR1JO{QTuD0U+~SlZxcc8~>IB{)@8p`P&+nDxNj`*gh|u?yrv$phpQcW)Us)bi`kT%qLj(fi{dWRZ%Es2!=3mI~UxiW0$-v3vUl?#g{p6eF zMEUAqo5-L0Ar(s{VlR9g=j7+lt!gP!UN2ICMokAZ5(Agd>})#gkA2w|5+<%-CuEP# zqgcM}u@3(QIC^Gx<2dbLj?cFSws_f3e%f4jeR?4M^M3cx1f+Qr6ydQ>n)kz1s##2w zk}UyQc+Z5G-d-1}{WzjkLXgS-2P7auWSJ%pSnD|Uivj5u!xk0 z_^-N9r9o;(rFDt~q1PvE#iJZ_f>J3gcP$)SOqhE~pD2|$=GvpL^d!r z6u=sp-CrMoF7;)}Zd7XO4XihC4ji?>V&(t^?@3Q&t9Mx=qex6C9d%{FE6dvU6%d94 zIE;hJ1J)cCqjv?F``7I*6bc#X)JW2b4f$L^>j{*$R`%5VHFi*+Q$2;nyieduE}qdS{L8y8F08yLs?w}{>8>$3236T-VMh@B zq-nujsb_1aUv_7g#)*rf9h%sFj*^mIcImRV*k~Vmw;%;YH(&ylYpy!&UjUVqqtfG` zox3esju?`unJJA_zKXRJP)rA3nXc$m^{S&-p|v|-0x9LHJm;XIww7C#R$?00l&Yyj z=e}gKUOpsImwW?N)+E(awoF@HyP^EhL+GlNB#k?R<2>95hz!h9sF@U20DHSB3~WMa zk90+858r@-+vWwkawJ)8ougd(i#1m3GLN{iSTylYz$brAsP%=&m$mQQrH$g%3-^VR zE%B`Vi&m8f3T~&myTEK28BDWCVzfWir1I?03;pX))|kY5ClO^+bae z*7E?g=3g7EiisYOrE+lA)2?Ln6q2*HLNpZEWMB|O-JI_oaHZB%CvYB(%=tU= zE*OY%QY58fW#RG5=gm0NR#iMB=EuNF@)%oZJ}nmm=tsJ?eGjia{e{yuU0l3{d^D@)kVDt=1PE)&tf_hHC%0MB znL|CRCPC}SeuVTdf>-QV70`0(EHizc21s^sU>y%hW0t!0&y<7}Wi-wGy>m%(-jsDj zP?mF|>p_K>liZ6ZP(w5(|9Ga%>tLgb$|doDDfkdW>Z z`)>V2XC?NJT26mL^@ zf+IKr27TfM!UbZ@?zRddC7#6ss1sw%CXJ4FWC+t3lHZupzM77m^=9 z&(a?-LxIq}*nvv)y?27lZ{j zifdl9hyJudyP2LpU$-kXctshbJDKS{WfulP5Dk~xU4Le4c#h^(YjJit4#R8_khheS z|8(>2ibaHES4+J|DBM7I#QF5u-*EdN{n=Kt@4Zt?@Tv{JZA{`4 zU#kYOv{#A&gGPwT+$Ud}AXlK3K7hYzo$(fBSFjrP{QQ zeaKg--L&jh$9N}`pu{Bs>?eDFPaWY4|9|foN%}i;3%;@4{dc+iw>m}{3rELqH21G! z`8@;w-zsJ1H(N3%|1B@#ioLOjib)j`EiJqPQVSbPSPVHCj6t5J&(NcWzBrzCiDt{4 zdlPAUKldz%6x5II1H_+jv)(xVL+a;P+-1hv_pM>gMRr%04@k;DTokASSKKhU1Qms| zrWh3a!b(J3n0>-tipg{a?UaKsP7?+|@A+1WPDiQIW1Sf@qDU~M_P65_s}7(gjTn0X zucyEm)o;f8UyshMy&>^SC3I|C6jR*R_GFwGranWZe*I>K+0k}pBuET&M~ z;Odo*ZcT?ZpduHyrf8E%IBFtv;JQ!N_m>!sV6ly$_1D{(&nO~w)G~Y`7sD3#hQk%^ zp}ucDF_$!6DAz*PM8yE(&~;%|=+h(Rn-=1Wykas_-@d&z#=S}rDf`4w(rVlcF&lF! z=1)M3YVz7orwk^BXhslJ8jR);sh^knJW(Qmm(QdSgIAIdlN4Te5KJisifjr?eB{FjAX1a0AB>d?qY4Wx>BZ8&}5K0fA+d{l8 z?^s&l8#j7pR&ijD?0b%;lL9l$P_mi2^*_OL+b}4kuLR$GAf85sOo02?Y#90}CCDiS zZ%rbCw>=H~CBO=C_JVV=xgDe%b4FaEFtuS7Q1##y686r%F6I)s-~2(}PWK|Z8M+Gu zl$y~5@#0Ka%$M<&Cv%L`a8X^@tY&T7<0|(6dNT=EsRe0%kp1Qyq!^43VAKYnr*A5~ zsI%lK1ewqO;0TpLrT9v}!@vJK{QoVa_+N4FYT#h?Y8rS1S&-G+m$FNMP?(8N`MZP zels(*?kK{{^g9DOzkuZXJ2;SrOQsp9T$hwRB1(phw1c7`!Q!by?Q#YsSM#I12RhU{$Q+{xj83axHcftEc$mNJ8_T7A-BQc*k(sZ+~NsO~xAA zxnbb%dam_fZlHvW7fKXrB~F&jS<4FD2FqY?VG?ix*r~MDXCE^WQ|W|WM;gsIA4lQP zJ2hAK@CF*3*VqPr2eeg6GzWFlICi8S>nO>5HvWzyZTE)hlkdC_>pBej*>o0EOHR|) z$?};&I4+_?wvL*g#PJ9)!bc#9BJu1(*RdNEn>#Oxta(VWeM40ola<0aOe2kSS~{^P zDJBd}0L-P#O-CzX*%+$#v;(x%<*SPgAje=F{Zh-@ucd2DA(yC|N_|ocs*|-!H%wEw z@Q!>siv2W;C^^j^59OAX03&}&D*W4EjCvfi(ygcL#~t8XGa#|NPO+*M@Y-)ctFA@I z-p7npT1#5zOLo>7q?aZpCZ=iecn3QYklP;gF0bq@>oyBq94f6C=;Csw3PkZ|5q=(c zfs`aw?II0e(h=|7o&T+hq&m$; zBrE09Twxd9BJ2P+QPN}*OdZ-JZV7%av@OM7v!!NL8R;%WFq*?{9T3{ct@2EKgc8h) zMxoM$SaF#p<`65BwIDfmXG6+OiK0e)`I=!A3E`+K@61f}0e z!2a*FOaDrOe>U`q%K!QN`&=&0C~)CaL3R4VY(NDt{Xz(Xpqru5=r#uQN1L$Je1*dkdqQ*=lofQaN%lO!<5z9ZlHgxt|`THd>2 zsWfU$9=p;yLyJyM^t zS2w9w?Bpto`@H^xJpZDKR1@~^30Il6oFGfk5%g6w*C+VM)+%R@gfIwNprOV5{F^M2 zO?n3DEzpT+EoSV-%OdvZvNF+pDd-ZVZ&d8 zKeIyrrfPN=EcFRCPEDCVflX#3-)Ik_HCkL(ejmY8vzcf-MTA{oHk!R2*36`O68$7J zf}zJC+bbQk--9Xm!u#lgLvx8TXx2J258E5^*IZ(FXMpq$2LUUvhWQPs((z1+2{Op% z?J}9k5^N=z;7ja~zi8a_-exIqWUBJwohe#4QJ`|FF*$C{lM18z^#hX6!5B8KAkLUX ziP=oti-gpV(BsLD{0(3*dw}4JxK23Y7M{BeFPucw!sHpY&l%Ws4pSm`+~V7;bZ%Dx zeI)MK=4vC&5#;2MT7fS?^ch9?2;%<8Jlu-IB&N~gg8t;6S-#C@!NU{`p7M8@2iGc& zg|JPg%@gCoCQ&s6JvDU&`X2S<57f(k8nJ1wvBu{8r?;q3_kpZZ${?|( z+^)UvR33sjSd)aT!UPkA;ylO6{aE3MQa{g%Mcf$1KONcjO@&g5zPHWtzM1rYC{_K> zgQNcs<{&X{OA=cEWw5JGqpr0O>x*Tfak2PE9?FuWtz^DDNI}rwAaT0(bdo-<+SJ6A z&}S%boGMWIS0L}=S>|-#kRX;e^sUsotry(MjE|3_9duvfc|nwF#NHuM-w7ZU!5ei8 z6Mkf>2)WunY2eU@C-Uj-A zG(z0Tz2YoBk>zCz_9-)4a>T46$(~kF+Y{#sA9MWH%5z#zNoz)sdXq7ZR_+`RZ%0(q zC7&GyS_|BGHNFl8Xa%@>iWh%Gr?=J5<(!OEjauj5jyrA-QXBjn0OAhJJ9+v=!LK`` z@g(`^*84Q4jcDL`OA&ZV60djgwG`|bcD*i50O}Q{9_noRg|~?dj%VtKOnyRs$Uzqg z191aWoR^rDX#@iSq0n z?9Sg$WSRPqSeI<}&n1T3!6%Wj@5iw5`*`Btni~G=&;J+4`7g#OQTa>u`{4ZZ(c@s$ zK0y;ySOGD-UTjREKbru{QaS>HjN<2)R%Nn-TZiQ(Twe4p@-saNa3~p{?^V9Nixz@a zykPv~<@lu6-Ng9i$Lrk(xi2Tri3q=RW`BJYOPC;S0Yly%77c727Yj-d1vF!Fuk{Xh z)lMbA69y7*5ufET>P*gXQrxsW+ zz)*MbHZv*eJPEXYE<6g6_M7N%#%mR{#awV3i^PafNv(zyI)&bH?F}2s8_rR(6%!V4SOWlup`TKAb@ee>!9JKPM=&8g#BeYRH9FpFybxBXQI2|g}FGJfJ+ zY-*2hB?o{TVL;Wt_ek;AP5PBqfDR4@Z->_182W z{P@Mc27j6jE*9xG{R$>6_;i=y{qf(c`5w9fa*`rEzX6t!KJ(p1H|>J1pC-2zqWENF zmm=Z5B4u{cY2XYl(PfrInB*~WGWik3@1oRhiMOS|D;acnf-Bs(QCm#wR;@Vf!hOPJ zgjhDCfDj$HcyVLJ=AaTbQ{@vIv14LWWF$=i-BDoC11}V;2V8A`S>_x)vIq44-VB-v z*w-d}$G+Ql?En8j!~ZkCpQ$|cA0|+rrY>tiCeWxkRGPoarxlGU2?7%k#F693RHT24 z-?JsiXlT2PTqZqNb&sSc>$d;O4V@|b6VKSWQb~bUaWn1Cf0+K%`Q&Wc<>mQ>*iEGB zbZ;aYOotBZ{vH3y<0A*L0QVM|#rf*LIsGx(O*-7)r@yyBIzJnBFSKBUSl1e|8lxU* zzFL+YDVVkIuzFWeJ8AbgN&w(4-7zbiaMn{5!JQXu)SELk*CNL+Fro|2v|YO)1l15t zs(0^&EB6DPMyaqvY>=KL>)tEpsn;N5Q#yJj<9}ImL((SqErWN3Q=;tBO~ExTCs9hB z2E$7eN#5wX4<3m^5pdjm#5o>s#eS_Q^P)tm$@SawTqF*1dj_i#)3};JslbLKHXl_N z)Fxzf>FN)EK&Rz&*|6&%Hs-^f{V|+_vL1S;-1K-l$5xiC@}%uDuwHYhmsV?YcOUlk zOYkG5v2+`+UWqpn0aaaqrD3lYdh0*!L`3FAsNKu=Q!vJu?Yc8n|CoYyDo_`r0mPoo z8>XCo$W4>l(==h?2~PoRR*kEe)&IH{1sM41mO#-36`02m#nTX{r*r`Q5rZ2-sE|nA zhnn5T#s#v`52T5|?GNS`%HgS2;R(*|^egNPDzzH_z^W)-Q98~$#YAe)cEZ%vge965AS_am#DK#pjPRr-!^za8>`kksCAUj(Xr*1NW5~e zpypt_eJpD&4_bl_y?G%>^L}=>xAaV>KR6;^aBytqpiHe%!j;&MzI_>Sx7O%F%D*8s zSN}cS^<{iiK)=Ji`FpO#^zY!_|D)qeRNAtgmH)m;qC|mq^j(|hL`7uBz+ULUj37gj zksdbnU+LSVo35riSX_4z{UX=%n&}7s0{WuZYoSfwAP`8aKN9P@%e=~1`~1ASL-z%# zw>DO&ixr}c9%4InGc*_y42bdEk)ZdG7-mTu0bD@_vGAr*NcFoMW;@r?@LUhRI zCUJgHb`O?M3!w)|CPu~ej%fddw20lod?Ufp8Dmt0PbnA0J%KE^2~AIcnKP()025V> zG>noSM3$5Btmc$GZoyP^v1@Poz0FD(6YSTH@aD0}BXva?LphAiSz9f&Y(aDAzBnUh z?d2m``~{z;{}kZJ>a^wYI?ry(V9hIoh;|EFc0*-#*`$T0DRQ1;WsqInG;YPS+I4{g zJGpKk%%Sdc5xBa$Q^_I~(F97eqDO7AN3EN0u)PNBAb+n+ zWBTxQx^;O9o0`=g+Zrt_{lP!sgWZHW?8bLYS$;1a@&7w9rD9|Ge;Gb?sEjFoF9-6v z#!2)t{DMHZ2@0W*fCx;62d#;jouz`R5Y(t{BT=$N4yr^^o$ON8d{PQ=!O zX17^CrdM~7D-;ZrC!||<+FEOxI_WI3CA<35va%4v>gc zEX-@h8esj=a4szW7x{0g$hwoWRQG$yK{@3mqd-jYiVofJE!Wok1* znV7Gm&Ssq#hFuvj1sRyHg(6PFA5U*Q8Rx>-blOs=lb`qa{zFy&n4xY;sd$fE+<3EI z##W$P9M{B3c3Si9gw^jlPU-JqD~Cye;wr=XkV7BSv#6}DrsXWFJ3eUNrc%7{=^sP> zrp)BWKA9<}^R9g!0q7yWlh;gr_TEOD|#BmGq<@IV;ueg+D2}cjpp+dPf&Q(36sFU&K8}hA85U61faW&{ zlB`9HUl-WWCG|<1XANN3JVAkRYvr5U4q6;!G*MTdSUt*Mi=z_y3B1A9j-@aK{lNvx zK%p23>M&=KTCgR!Ee8c?DAO2_R?B zkaqr6^BSP!8dHXxj%N1l+V$_%vzHjqvu7p@%Nl6;>y*S}M!B=pz=aqUV#`;h%M0rU zHfcog>kv3UZAEB*g7Er@t6CF8kHDmKTjO@rejA^ULqn!`LwrEwOVmHx^;g|5PHm#B zZ+jjWgjJ!043F+&#_;D*mz%Q60=L9Ove|$gU&~As5^uz@2-BfQ!bW)Khn}G+Wyjw- z19qI#oB(RSNydn0t~;tAmK!P-d{b-@@E5|cdgOS#!>%#Rj6ynkMvaW@37E>@hJP^8 z2zk8VXx|>#R^JCcWdBCy{0nPmYFOxN55#^-rlqobe0#L6)bi?E?SPymF*a5oDDeSd zO0gx?#KMoOd&G(2O@*W)HgX6y_aa6iMCl^~`{@UR`nMQE`>n_{_aY5nA}vqU8mt8H z`oa=g0SyiLd~BxAj2~l$zRSDHxvDs;I4>+M$W`HbJ|g&P+$!U7-PHX4RAcR0szJ*( ze-417=bO2q{492SWrqDK+L3#ChUHtz*@MP)e^%@>_&#Yk^1|tv@j4%3T)diEX zATx4K*hcO`sY$jk#jN5WD<=C3nvuVsRh||qDHnc~;Kf59zr0;c7VkVSUPD%NnnJC_ zl3F^#f_rDu8l}l8qcAz0FFa)EAt32IUy_JLIhU_J^l~FRH&6-ivSpG2PRqzDdMWft>Zc(c)#tb%wgmWN%>IOPm zZi-noqS!^Ftb81pRcQi`X#UhWK70hy4tGW1mz|+vI8c*h@ zfFGJtW3r>qV>1Z0r|L>7I3un^gcep$AAWfZHRvB|E*kktY$qQP_$YG60C@X~tTQjB3%@`uz!qxtxF+LE!+=nrS^07hn` zEgAp!h|r03h7B!$#OZW#ACD+M;-5J!W+{h|6I;5cNnE(Y863%1(oH}_FTW})8zYb$7czP zg~Szk1+_NTm6SJ0MS_|oSz%e(S~P-&SFp;!k?uFayytV$8HPwuyELSXOs^27XvK-D zOx-Dl!P|28DK6iX>p#Yb%3`A&CG0X2S43FjN%IB}q(!hC$fG}yl1y9W&W&I@KTg6@ zK^kpH8=yFuP+vI^+59|3%Zqnb5lTDAykf z9S#X`3N(X^SpdMyWQGOQRjhiwlj!0W-yD<3aEj^&X%=?`6lCy~?`&WSWt z?U~EKFcCG_RJ(Qp7j=$I%H8t)Z@6VjA#>1f@EYiS8MRHZphp zMA_5`znM=pzUpBPO)pXGYpQ6gkine{6u_o!P@Q+NKJ}k!_X7u|qfpAyIJb$_#3@wJ z<1SE2Edkfk9C!0t%}8Yio09^F`YGzpaJHGk*-ffsn85@)%4@`;Fv^8q(-Wk7r=Q8p zT&hD`5(f?M{gfzGbbwh8(}G#|#fDuk7v1W)5H9wkorE0ZZjL0Q1=NRGY>zwgfm81DdoaVwNH;or{{eSyybt)m<=zXoA^RALYG-2t zouH|L*BLvmm9cdMmn+KGopyR@4*=&0&4g|FLoreZOhRmh=)R0bg~ zT2(8V_q7~42-zvb)+y959OAv!V$u(O3)%Es0M@CRFmG{5sovIq4%8Ahjk#*5w{+)+ zMWQoJI_r$HxL5km1#6(e@{lK3Udc~n0@g`g$s?VrnQJ$!oPnb?IHh-1qA`Rz$)Ai< z6w$-MJW-gKNvOhL+XMbE7&mFt`x1KY>k4(!KbbpZ`>`K@1J<(#vVbjx@Z@(6Q}MF# zMnbr-f55(cTa^q4+#)=s+ThMaV~E`B8V=|W_fZWDwiso8tNMTNse)RNBGi=gVwgg% zbOg8>mbRN%7^Um-7oj4=6`$|(K7!+t^90a{$18Z>}<#!bm%ZEFQ{X(yBZMc>lCz0f1I2w9Sq zuGh<9<=AO&g6BZte6hn>Qmvv;Rt)*cJfTr2=~EnGD8P$v3R|&1RCl&7)b+`=QGapi zPbLg_pxm`+HZurtFZ;wZ=`Vk*do~$wB zxoW&=j0OTbQ=Q%S8XJ%~qoa3Ea|au5o}_(P;=!y-AjFrERh%8la!z6Fn@lR?^E~H12D?8#ht=1F;7@o4$Q8GDj;sSC%Jfn01xgL&%F2 zwG1|5ikb^qHv&9hT8w83+yv&BQXOQyMVJSBL(Ky~p)gU3#%|blG?IR9rP^zUbs7rOA0X52Ao=GRt@C&zlyjNLv-} z9?*x{y(`509qhCV*B47f2hLrGl^<@SuRGR!KwHei?!CM10Tq*YDIoBNyRuO*>3FU? zHjipIE#B~y3FSfOsMfj~F9PNr*H?0oHyYB^G(YyNh{SxcE(Y-`x5jFMKb~HO*m+R% zrq|ic4fzJ#USpTm;X7K+E%xsT_3VHKe?*uc4-FsILUH;kL>_okY(w`VU*8+l>o>Jm ziU#?2^`>arnsl#)*R&nf_%>A+qwl%o{l(u)M?DK1^mf260_oteV3#E_>6Y4!_hhVD zM8AI6MM2V*^_M^sQ0dmHu11fy^kOqXqzpr?K$`}BKWG`=Es(9&S@K@)ZjA{lj3ea7_MBP zk(|hBFRjHVMN!sNUkrB;(cTP)T97M$0Dtc&UXSec<+q?y>5=)}S~{Z@ua;1xt@=T5 zI7{`Z=z_X*no8s>mY;>BvEXK%b`a6(DTS6t&b!vf_z#HM{Uoy_5fiB(zpkF{})ruka$iX*~pq1ZxD?q68dIo zIZSVls9kFGsTwvr4{T_LidcWtt$u{kJlW7moRaH6+A5hW&;;2O#$oKyEN8kx`LmG)Wfq4ykh+q{I3|RfVpkR&QH_x;t41Uw z`P+tft^E2B$domKT@|nNW`EHwyj>&}K;eDpe z1bNOh=fvIfk`&B61+S8ND<(KC%>y&?>opCnY*r5M+!UrWKxv0_QvTlJc>X#AaI^xo zaRXL}t5Ej_Z$y*|w*$6D+A?Lw-CO-$itm^{2Ct82-<0IW)0KMNvJHgBrdsIR0v~=H z?n6^}l{D``Me90`^o|q!olsF?UX3YSq^6Vu>Ijm>>PaZI8G@<^NGw{Cx&%|PwYrfw zR!gX_%AR=L3BFsf8LxI|K^J}deh0ZdV?$3r--FEX`#INxsOG6_=!v)DI>0q|BxT)z z-G6kzA01M?rba+G_mwNMQD1mbVbNTWmBi*{s_v_Ft9m2Avg!^78(QFu&n6mbRJ2bA zv!b;%yo{g*9l2)>tsZJOOp}U~8VUH`}$ z8p_}t*XIOehezolNa-a2x0BS})Y9}&*TPgua{Ewn-=wVrmJUeU39EKx+%w%=ixQWK zDLpwaNJs65#6o7Ln7~~X+p_o2BR1g~VCfxLzxA{HlWAI6^H;`juI=&r1jQrUv_q0Z z1Ja-tjdktrrP>GOC*#p?*xfQU5MqjMsBe!9lh(u8)w$e@Z|>aUHI5o;MGw*|Myiz3 z-f0;pHg~Q#%*Kx8MxH%AluVXjG2C$)WL-K63@Q`#y9_k_+}eR(x4~dp7oV-ek0H>I zgy8p#i4GN{>#v=pFYUQT(g&b$OeTy-X_#FDgNF8XyfGY6R!>inYn8IR2RDa&O!(6< znXs{W!bkP|s_YI*Yx%4stI`=ZO45IK6rBs`g7sP40ic}GZ58s?Mc$&i`kq_tfci>N zIHrC0H+Qpam1bNa=(`SRKjixBTtm&e`j9porEci!zdlg1RI0Jw#b(_Tb@RQK1Zxr_ z%7SUeH6=TrXt3J@js`4iDD0=IoHhK~I7^W8^Rcp~Yaf>2wVe|Hh1bUpX9ATD#moByY57-f2Ef1TP^lBi&p5_s7WGG9|0T}dlfxOx zXvScJO1Cnq`c`~{Dp;{;l<-KkCDE+pmexJkd}zCgE{eF=)K``-qC~IT6GcRog_)!X z?fK^F8UDz$(zFUrwuR$qro5>qqn>+Z%<5>;_*3pZ8QM|yv9CAtrAx;($>4l^_$_-L z*&?(77!-=zvnCVW&kUcZMb6;2!83si518Y%R*A3JZ8Is|kUCMu`!vxDgaWjs7^0j( ziTaS4HhQ)ldR=r)_7vYFUr%THE}cPF{0H45FJ5MQW^+W>P+eEX2kLp3zzFe*-pFVA zdDZRybv?H|>`9f$AKVjFWJ=wegO7hOOIYCtd?Vj{EYLT*^gl35|HQ`R=ti+ADm{jyQE7K@kdjuqJhWVSks>b^ zxha88-h3s;%3_5b1TqFCPTxVjvuB5U>v=HyZ$?JSk+&I%)M7KE*wOg<)1-Iy)8-K! z^XpIt|0ibmk9RtMmlUd7#Ap3Q!q9N4atQy)TmrhrFhfx1DAN`^vq@Q_SRl|V z#lU<~n67$mT)NvHh`%als+G-)x1`Y%4Bp*6Un5Ri9h=_Db zA-AdP!f>f0m@~>7X#uBM?diI@)Egjuz@jXKvm zJo+==juc9_<;CqeRaU9_Mz@;3e=E4=6TK+c`|uu#pIqhSyNm`G(X)&)B`8q0RBv#> z`gGlw(Q=1Xmf55VHj%C#^1lpc>LY8kfA@|rlC1EA<1#`iuyNO z(=;irt{_&K=i4)^x%;U(Xv<)+o=dczC5H3W~+e|f~{*ucxj@{Yi-cw^MqYr3fN zF5D+~!wd$#al?UfMnz(@K#wn`_5na@rRr8XqN@&M&FGEC@`+OEv}sI1hw>Up0qAWf zL#e4~&oM;TVfjRE+10B_gFlLEP9?Q-dARr3xi6nQqnw>k-S;~b z;!0s2VS4}W8b&pGuK=7im+t(`nz@FnT#VD|!)eQNp-W6)@>aA+j~K*H{$G`y2|QHY z|Hmy+CR@#jWY4~)lr1qBJB_RfHJFfP<}pK5(#ZZGSqcpyS&}01LnTWk5fzmXMGHkJ zTP6L^B+uj;lmB_W<~4=${+v0>z31M!-_O@o-O9GyW)j_mjx}!0@br_LE-7SIuPP84 z;5=O(U*g_um0tyG|61N@d9lEuOeiRd+#NY^{nd5;-CVlw&Ap7J?qwM^?E29wvS}2d zbzar4Fz&RSR(-|s!Z6+za&Z zY#D<5q_JUktIzvL0)yq_kLWG6DO{ri=?c!y!f(Dk%G{8)k`Gym%j#!OgXVDD3;$&v@qy#ISJfp=Vm>pls@9-mapVQChAHHd-x+OGx)(*Yr zC1qDUTZ6mM(b_hi!TuFF2k#8uI2;kD70AQ&di$L*4P*Y-@p`jdm%_c3f)XhYD^6M8&#Y$ZpzQMcR|6nsH>b=*R_Von!$BTRj7yGCXokoAQ z&ANvx0-Epw`QIEPgI(^cS2f(Y85yV@ygI{ewyv5Frng)e}KCZF7JbR(&W618_dcEh(#+^zZFY;o<815<5sOHQdeax9_!PyM&;{P zkBa5xymca0#)c#tke@3KNEM8a_mT&1gm;p&&JlMGH(cL(b)BckgMQ^9&vRwj!~3@l zY?L5}=Jzr080OGKb|y`ee(+`flQg|!lo6>=H)X4`$Gz~hLmu2a%kYW_Uu8x09Pa0J zKZ`E$BKJ=2GPj_3l*TEcZ*uYRr<*J^#5pILTT;k_cgto1ZL-%slyc16J~OH-(RgDA z%;EjEnoUkZ&acS{Q8`{i6T5^nywgqQI5bDIymoa7CSZG|WWVk>GM9)zy*bNih|QIm z%0+(Nnc*a_xo;$=!HQYaapLms>J1ToyjtFByY`C2H1wT#178#4+|{H0BBqtCdd$L% z_3Hc60j@{t9~MjM@LBalR&6@>B;9?r<7J~F+WXyYu*y3?px*=8MAK@EA+jRX8{CG?GI-< z54?Dc9CAh>QTAvyOEm0^+x;r2BWX|{3$Y7)L5l*qVE*y0`7J>l2wCmW zL1?|a`pJ-l{fb_N;R(Z9UMiSj6pQjOvQ^%DvhIJF!+Th7jO2~1f1N+(-TyCFYQZYw z4)>7caf^Ki_KJ^Zx2JUb z&$3zJy!*+rCV4%jqwyuNY3j1ZEiltS0xTzd+=itTb;IPYpaf?8Y+RSdVdpacB(bVQ zC(JupLfFp8y43%PMj2}T|VS@%LVp>hv4Y!RPMF?pp8U_$xCJ)S zQx!69>bphNTIb9yn*_yfj{N%bY)t{L1cs8<8|!f$;UQ*}IN=2<6lA;x^(`8t?;+ST zh)z4qeYYgZkIy{$4x28O-pugO&gauRh3;lti9)9Pvw+^)0!h~%m&8Q!AKX%urEMnl z?yEz?g#ODn$UM`+Q#$Q!6|zsq_`dLO5YK-6bJM6ya>}H+vnW^h?o$z;V&wvuM$dR& zeEq;uUUh$XR`TWeC$$c&Jjau2it3#%J-y}Qm>nW*s?En?R&6w@sDXMEr#8~$=b(gk zwDC3)NtAP;M2BW_lL^5ShpK$D%@|BnD{=!Tq)o(5@z3i7Z){} zGr}Exom_qDO{kAVkZ*MbLNHE666Kina#D{&>Jy%~w7yX$oj;cYCd^p9zy z8*+wgSEcj$4{WxKmCF(5o7U4jqwEvO&dm1H#7z}%VXAbW&W24v-tS6N3}qrm1OnE)fUkoE8yMMn9S$?IswS88tQWm4#Oid#ckgr6 zRtHm!mfNl-`d>O*1~d7%;~n+{Rph6BBy^95zqI{K((E!iFQ+h*C3EsbxNo_aRm5gj zKYug($r*Q#W9`p%Bf{bi6;IY0v`pB^^qu)gbg9QHQ7 zWBj(a1YSu)~2RK8Pi#C>{DMlrqFb9e_RehEHyI{n?e3vL_}L>kYJC z_ly$$)zFi*SFyNrnOt(B*7E$??s67EO%DgoZL2XNk8iVx~X_)o++4oaK1M|ou73vA0K^503j@uuVmLcHH4ya-kOIDfM%5%(E z+Xpt~#7y2!KB&)PoyCA+$~DXqxPxxALy!g-O?<9+9KTk4Pgq4AIdUkl`1<1#j^cJg zgU3`0hkHj_jxV>`Y~%LAZl^3o0}`Sm@iw7kwff{M%VwtN)|~!p{AsfA6vB5UolF~d zHWS%*uBDt<9y!9v2Xe|au&1j&iR1HXCdyCjxSgG*L{wmTD4(NQ=mFjpa~xooc6kju z`~+d{j7$h-;HAB04H!Zscu^hZffL#9!p$)9>sRI|Yovm)g@F>ZnosF2EgkU3ln0bR zTA}|+E(tt)!SG)-bEJi_0m{l+(cAz^pi}`9=~n?y&;2eG;d9{M6nj>BHGn(KA2n|O zt}$=FPq!j`p&kQ8>cirSzkU0c08%8{^Qyqi-w2LoO8)^E7;;I1;HQ6B$u0nNaX2CY zSmfi)F`m94zL8>#zu;8|{aBui@RzRKBlP1&mfFxEC@%cjl?NBs`cr^nm){>;$g?rhKr$AO&6qV_Wbn^}5tfFBry^e1`%du2~o zs$~dN;S_#%iwwA_QvmMjh%Qo?0?rR~6liyN5Xmej8(*V9ym*T`xAhHih-v$7U}8=dfXi2i*aAB!xM(Xekg*ix@r|ymDw*{*s0?dlVys2e)z62u1 z+k3esbJE=-P5S$&KdFp+2H7_2e=}OKDrf( z9-207?6$@f4m4B+9E*e((Y89!q?zH|mz_vM>kp*HGXldO0Hg#!EtFhRuOm$u8e~a9 z5(roy7m$Kh+zjW6@zw{&20u?1f2uP&boD}$#Zy)4o&T;vyBoqFiF2t;*g=|1=)PxB z8eM3Mp=l_obbc?I^xyLz?4Y1YDWPa+nm;O<$Cn;@ane616`J9OO2r=rZr{I_Kizyc zP#^^WCdIEp*()rRT+*YZK>V@^Zs=ht32x>Kwe zab)@ZEffz;VM4{XA6e421^h~`ji5r%)B{wZu#hD}f3$y@L0JV9f3g{-RK!A?vBUA}${YF(vO4)@`6f1 z-A|}e#LN{)(eXloDnX4Vs7eH|<@{r#LodP@Nz--$Dg_Par%DCpu2>2jUnqy~|J?eZ zBG4FVsz_A+ibdwv>mLp>P!(t}E>$JGaK$R~;fb{O3($y1ssQQo|5M;^JqC?7qe|hg zu0ZOqeFcp?qVn&Qu7FQJ4hcFi&|nR!*j)MF#b}QO^lN%5)4p*D^H+B){n8%VPUzi! zDihoGcP71a6!ab`l^hK&*dYrVYzJ0)#}xVrp!e;lI!+x+bfCN0KXwUAPU9@#l7@0& QuEJmfE|#`Dqx|px0L@K;Y5)KL diff --git a/plugins/examples/gradlew b/plugins/examples/gradlew index 1b6c787337ffb..f5feea6d6b116 100755 --- a/plugins/examples/gradlew +++ b/plugins/examples/gradlew @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# ############################################################################## # @@ -55,7 +57,7 @@ # Darwin, MinGW, and NonStop. # # (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt # within the Gradle project. # # You can find Gradle at https://github.com/gradle/gradle/. @@ -80,13 +82,12 @@ do esac done -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit - -APP_NAME="Gradle" +# This is normally unused +# shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s +' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum @@ -133,22 +134,29 @@ location of your Java installation." fi else JAVACMD=java - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." + fi fi # Increase the maximum file descriptors if we can. if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac case $MAX_FD in #( '' | soft) :;; #( *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -193,11 +201,15 @@ if "$cygwin" || "$msys" ; then done fi -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. set -- \ "-Dorg.gradle.appname=$APP_BASE_NAME" \ @@ -205,6 +217,12 @@ set -- \ org.gradle.wrapper.GradleWrapperMain \ "$@" +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + # Use "xargs" to parse quoted args. # # With -n1 it outputs one arg per line, with the quotes and backslashes removed. diff --git a/plugins/examples/gradlew.bat b/plugins/examples/gradlew.bat index ac1b06f93825d..9b42019c7915b 100644 --- a/plugins/examples/gradlew.bat +++ b/plugins/examples/gradlew.bat @@ -13,8 +13,10 @@ @rem See the License for the specific language governing permissions and @rem limitations under the License. @rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem -@if "%DEBUG%" == "" @echo off +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -25,7 +27,8 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @@ -40,13 +43,13 @@ if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute +if %ERRORLEVEL% equ 0 goto execute -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail @@ -56,11 +59,11 @@ set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto execute -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail @@ -75,13 +78,15 @@ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal From 00a5c6d84d8ee8cd723ccde69ed61f3700a9ecba Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Wed, 4 Sep 2024 12:16:22 -0400 Subject: [PATCH 044/115] Revert "[ML] Return both modelId and inferenceId (#111490)" (#112508) Both `model_id` and `inference_id` were returned from the inference API to support backwards compatibility while kibana migrates to `inference_id`. Now that the migration is complete, we will stop returning `model_id`. This reverts commit 3b8970cf12484d5c1ea3e484e60c34e083887988. --- docs/changelog/112508.yaml | 5 +++++ .../org/elasticsearch/inference/ModelConfigurations.java | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/112508.yaml diff --git a/docs/changelog/112508.yaml b/docs/changelog/112508.yaml new file mode 100644 index 0000000000000..3945ebd226ac4 --- /dev/null +++ b/docs/changelog/112508.yaml @@ -0,0 +1,5 @@ +pr: 112508 +summary: "[ML] Create Inference API will no longer return model_id and now only return inference_id" +area: Machine Learning +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java index ce83a7c1b6c79..0df0378c4a5f4 100644 --- a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java +++ b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java @@ -126,7 +126,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (params.paramAsBoolean(USE_ID_FOR_INDEX, false)) { builder.field(INDEX_ONLY_ID_FIELD_NAME, inferenceEntityId); } else { - builder.field(INDEX_ONLY_ID_FIELD_NAME, inferenceEntityId); builder.field(INFERENCE_ID_FIELD_NAME, inferenceEntityId); } builder.field(TaskType.NAME, taskType.toString()); @@ -143,7 +142,6 @@ public XContentBuilder toFilteredXContent(XContentBuilder builder, Params params if (params.paramAsBoolean(USE_ID_FOR_INDEX, false)) { builder.field(INDEX_ONLY_ID_FIELD_NAME, inferenceEntityId); } else { - builder.field(INDEX_ONLY_ID_FIELD_NAME, inferenceEntityId); builder.field(INFERENCE_ID_FIELD_NAME, inferenceEntityId); } builder.field(TaskType.NAME, taskType.toString()); From 1d7e7bd2b6d5ce37e01fbdc6fff7933ce5a7a8d0 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 4 Sep 2024 19:25:28 +0200 Subject: [PATCH 045/115] Lower zstd blockDocCount for best_speed from 128 to 96 (#112098) Query latencies are more important for best speed use cases. Zstd stored fields have shown some increase in query latencies when limited number of stored field data is fetched (in case of get by id like queries). By decreasing the blockDocCount we try to reduce these latencies at the cost of using a little bit more storage. Based on the shared results below, setting blockDocCount to 96. Note that zstd is still behind a feature flag and this is the last things we like to tune before removing the feature flag. In all other cases zstd has shown to be the better trade of compared to lz4 and deflate (which get used depending on index.codec setting). After merging this change, we should monitor the nightly benchmarks and see the total effect before removing the feature flag. --- .../index/codec/zstd/Zstd814StoredFieldsFormat.java | 2 +- .../src/test/java/org/elasticsearch/index/codec/CodecTests.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java index 840b37611374a..c1ee9cab01106 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java @@ -47,7 +47,7 @@ public final class Zstd814StoredFieldsFormat extends Lucene90CompressingStoredFi public static final String MODE_KEY = Zstd814StoredFieldsFormat.class.getSimpleName() + ".mode"; public enum Mode { - BEST_SPEED(0, BEST_SPEED_BLOCK_SIZE, 128), + BEST_SPEED(0, BEST_SPEED_BLOCK_SIZE, 96), BEST_COMPRESSION(3, BEST_COMPRESSION_BLOCK_SIZE, 2048); final int level, blockSizeInBytes, blockDocCount; diff --git a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java index 002e42b3198e6..c56ef138724d6 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -58,7 +58,7 @@ public void testDefault() throws Exception { assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG.isEnabled()); Codec codec = createCodecService().codec("default"); assertEquals( - "Zstd814StoredFieldsFormat(compressionMode=ZSTD(level=0), chunkSize=14336, maxDocsPerChunk=128, blockShift=10)", + "Zstd814StoredFieldsFormat(compressionMode=ZSTD(level=0), chunkSize=14336, maxDocsPerChunk=96, blockShift=10)", codec.storedFieldsFormat().toString() ); } From 827e90da506724bcc160cec46eda973fc36ad721 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 4 Sep 2024 19:46:11 +0200 Subject: [PATCH 046/115] Misc cleanups for query phase (#112521) Avoiding some redundant computation in obvious spots, fixing compile warnings and using a more specific listener in one spot to save memory and indirection. --- .../query/FilteredSearchExecutionContext.java | 3 +- .../index/query/InnerHitsRewriteContext.java | 3 +- .../index/query/QueryRewriteContext.java | 5 ++- .../index/query/SearchExecutionContext.java | 3 +- .../search/DefaultSearchContext.java | 15 +++++---- .../elasticsearch/search/SearchService.java | 31 +++++++++---------- .../search/internal/ShardSearchRequest.java | 29 ++++++++--------- .../AggregatorFactoriesTests.java | 8 ++--- 8 files changed, 45 insertions(+), 52 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/FilteredSearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/FilteredSearchExecutionContext.java index 26415a3d0e777..9140409bf88a1 100644 --- a/server/src/main/java/org/elasticsearch/index/query/FilteredSearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/FilteredSearchExecutionContext.java @@ -239,8 +239,7 @@ public void registerAsyncAction(BiConsumer> asyncActio } @Override - @SuppressWarnings("rawtypes") - public void executeAsyncActions(ActionListener listener) { + public void executeAsyncActions(ActionListener listener) { in.executeAsyncActions(listener); } diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java index 0b437fa451e1b..c5943f571d0d3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java @@ -26,8 +26,7 @@ public InnerHitsRewriteContext convertToInnerHitsRewriteContext() { } @Override - @SuppressWarnings({ "rawtypes" }) - public void executeAsyncActions(ActionListener listener) { + public void executeAsyncActions(ActionListener listener) { // InnerHitsRewriteContext does not support async actions at all, and doesn't supply a valid `client` object throw new UnsupportedOperationException("InnerHitsRewriteContext does not support async actions"); } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 9537aeec6a219..90633e1365ba1 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -283,13 +283,12 @@ public boolean hasAsyncActions() { * Executes all registered async actions and notifies the listener once it's done. The value that is passed to the listener is always * null. The list of registered actions is cleared once this method returns. */ - @SuppressWarnings({ "unchecked", "rawtypes" }) - public void executeAsyncActions(ActionListener listener) { + public void executeAsyncActions(ActionListener listener) { if (asyncActions.isEmpty()) { listener.onResponse(null); } else { CountDown countDown = new CountDown(asyncActions.size()); - ActionListener internalListener = new ActionListener() { + ActionListener internalListener = new ActionListener<>() { @Override public void onResponse(Object o) { if (countDown.countDown()) { diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index 106a5e811c48d..514cb7e88013f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -615,8 +615,7 @@ public void registerAsyncAction(BiConsumer> asyncActio } @Override - @SuppressWarnings("rawtypes") - public void executeAsyncActions(ActionListener listener) { + public void executeAsyncActions(ActionListener listener) { failIfFrozen(); super.executeAsyncActions(listener); } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index dc92cfd11fce3..ade79bd0d824c 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -179,13 +179,6 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = readerContext.indexShard(); Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); - int maximumNumberOfSlices = determineMaximumNumberOfSlices( - executor, - request, - resultsType, - enableQueryPhaseParallelCollection, - field -> getFieldCardinality(field, readerContext.indexService(), engineSearcher.getDirectoryReader()) - ); if (executor == null) { this.searcher = new ContextIndexSearcher( engineSearcher.getIndexReader(), @@ -202,7 +195,13 @@ final class DefaultSearchContext extends SearchContext { engineSearcher.getQueryCachingPolicy(), lowLevelCancellation, executor, - maximumNumberOfSlices, + determineMaximumNumberOfSlices( + executor, + request, + resultsType, + enableQueryPhaseParallelCollection, + field -> getFieldCardinality(field, readerContext.indexService(), engineSearcher.getDirectoryReader()) + ), minimumDocsPerSlice ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 9dc44d5f66948..6f70938a1e5e3 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1779,14 +1779,12 @@ private static boolean canMatchAfterRewrite(final ShardSearchRequest request, fi @SuppressWarnings("unchecked") public static boolean queryStillMatchesAfterRewrite(ShardSearchRequest request, QueryRewriteContext context) throws IOException { Rewriteable.rewrite(request.getRewriteable(), context, false); - boolean canMatch = request.getAliasFilter().getQueryBuilder() instanceof MatchNoneQueryBuilder == false; - if (canRewriteToMatchNone(request.source())) { - canMatch &= request.source() - .subSearches() - .stream() - .anyMatch(sqwb -> sqwb.getQueryBuilder() instanceof MatchNoneQueryBuilder == false); + if (request.getAliasFilter().getQueryBuilder() instanceof MatchNoneQueryBuilder) { + return false; } - return canMatch; + final var source = request.source(); + return canRewriteToMatchNone(source) == false + || source.subSearches().stream().anyMatch(sqwb -> sqwb.getQueryBuilder() instanceof MatchNoneQueryBuilder == false); } /** @@ -1806,19 +1804,18 @@ public static boolean canRewriteToMatchNone(SearchSourceBuilder source) { return aggregations == null || aggregations.mustVisitAllDocs() == false; } - @SuppressWarnings({ "rawtypes", "unchecked" }) + @SuppressWarnings("unchecked") private void rewriteAndFetchShardRequest(IndexShard shard, ShardSearchRequest request, ActionListener listener) { - ActionListener actionListener = listener.delegateFailureAndWrap((l, r) -> { - if (request.readerId() != null) { - l.onResponse(request); - } else { - shard.ensureShardSearchActive(b -> l.onResponse(request)); - } - }); // we also do rewrite on the coordinating node (TransportSearchService) but we also need to do it here. // AliasFilters and other things may need to be rewritten on the data node, but not per individual shard. - // These are uncommon-cases but we are very efficient doing the rewrite here. - Rewriteable.rewriteAndFetch(request.getRewriteable(), indicesService.getDataRewriteContext(request::nowInMillis), actionListener); + // These are uncommon-cases, but we are very efficient doing the rewrite here. + Rewriteable.rewriteAndFetch( + request.getRewriteable(), + indicesService.getDataRewriteContext(request::nowInMillis), + request.readerId() == null + ? listener.delegateFailureAndWrap((l, r) -> shard.ensureShardSearchActive(b -> l.onResponse(request))) + : listener.safeMap(r -> request) + ); } /** diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 488c956c187d5..a569915ae091d 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -587,22 +587,23 @@ public Rewriteable rewrite(QueryRewriteContext ctx) throws IOException { SearchSourceBuilder newSource = request.source() == null ? null : Rewriteable.rewrite(request.source(), ctx); AliasFilter newAliasFilter = Rewriteable.rewrite(request.getAliasFilter(), ctx); SearchExecutionContext searchExecutionContext = ctx.convertToSearchExecutionContext(); - FieldSortBuilder primarySort = FieldSortBuilder.getPrimaryFieldSortOrNull(newSource); - if (searchExecutionContext != null - && primarySort != null - && primarySort.isBottomSortShardDisjoint(searchExecutionContext, request.getBottomSortValues())) { - assert newSource != null : "source should contain a primary sort field"; - newSource = newSource.shallowCopy(); - int trackTotalHitsUpTo = SearchRequest.resolveTrackTotalHitsUpTo(request.scroll, request.source); - if (trackTotalHitsUpTo == TRACK_TOTAL_HITS_DISABLED && newSource.suggest() == null && newSource.aggregations() == null) { - newSource.query(new MatchNoneQueryBuilder()); - } else { - newSource.size(0); + if (searchExecutionContext != null) { + final FieldSortBuilder primarySort = FieldSortBuilder.getPrimaryFieldSortOrNull(newSource); + if (primarySort != null && primarySort.isBottomSortShardDisjoint(searchExecutionContext, request.getBottomSortValues())) { + assert newSource != null : "source should contain a primary sort field"; + newSource = newSource.shallowCopy(); + int trackTotalHitsUpTo = SearchRequest.resolveTrackTotalHitsUpTo(request.scroll, request.source); + if (trackTotalHitsUpTo == TRACK_TOTAL_HITS_DISABLED + && newSource.suggest() == null + && newSource.aggregations() == null) { + newSource.query(new MatchNoneQueryBuilder()); + } else { + newSource.size(0); + } + request.source(newSource); + request.setBottomSortValues(null); } - request.source(newSource); - request.setBottomSortValues(null); } - if (newSource == request.source() && newAliasFilter == request.getAliasFilter()) { return this; } else { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java index d5fb8f1b63e7e..0ef88153d85c5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java @@ -262,9 +262,9 @@ public void testRewritePipelineAggregationUnderAggregation() throws Exception { QueryRewriteContext context = new QueryRewriteContext(parserConfig(), null, () -> 0L); AggregatorFactories.Builder rewritten = builder.rewrite(context); CountDownLatch latch = new CountDownLatch(1); - context.executeAsyncActions(new ActionListener() { + context.executeAsyncActions(new ActionListener<>() { @Override - public void onResponse(Object response) { + public void onResponse(Void aVoid) { assertNotSame(builder, rewritten); Collection aggregatorFactories = rewritten.getAggregatorFactories(); assertEquals(1, aggregatorFactories.size()); @@ -289,9 +289,9 @@ public void testRewriteAggregationAtTopLevel() throws Exception { QueryRewriteContext context = new QueryRewriteContext(parserConfig(), null, () -> 0L); AggregatorFactories.Builder rewritten = builder.rewrite(context); CountDownLatch latch = new CountDownLatch(1); - context.executeAsyncActions(new ActionListener() { + context.executeAsyncActions(new ActionListener<>() { @Override - public void onResponse(Object response) { + public void onResponse(Void aVoid) { assertNotSame(builder, rewritten); PipelineAggregationBuilder rewrittenPipeline = rewritten.getPipelineAggregatorFactories().iterator().next(); assertThat(((RewrittenPipelineAggregationBuilder) rewrittenPipeline).setOnRewrite.get(), equalTo("rewritten")); From e1b42090ec45b3c847d4a93c19f38eef8014dbb5 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Wed, 4 Sep 2024 20:03:23 +0200 Subject: [PATCH 047/115] Allow dimension fields to have multiple values in standard and logsdb index mode (#112345) Fixes https://github.com/elastic/elasticsearch/issues/112232 Fixes https://github.com/elastic/elasticsearch/issues/112239 --- docs/changelog/112345.yaml | 8 +++ .../org/elasticsearch/index/IndexMode.java | 4 +- .../index/mapper/DocumentDimensions.java | 30 +++-------- .../index/mapper/BooleanFieldMapperTests.java | 24 +++++++-- .../index/mapper/IpFieldMapperTests.java | 21 +++++++- .../index/mapper/KeywordFieldMapperTests.java | 24 +++++++-- .../flattened/FlattenedFieldMapperTests.java | 24 +++++++-- .../index/mapper/MapperServiceTestCase.java | 8 +++ .../mapper/WholeNumberFieldMapperTests.java | 21 +++++++- .../UnsignedLongFieldMapperTests.java | 20 ++++++- .../rest-api-spec/test/20_logs.tests.yml | 22 -------- .../rest-api-spec/test/20_logs_tests.yml | 53 +++++++++++++++++++ 12 files changed, 194 insertions(+), 65 deletions(-) create mode 100644 docs/changelog/112345.yaml delete mode 100644 x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml create mode 100644 x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml diff --git a/docs/changelog/112345.yaml b/docs/changelog/112345.yaml new file mode 100644 index 0000000000000..b922fe3754cbb --- /dev/null +++ b/docs/changelog/112345.yaml @@ -0,0 +1,8 @@ +pr: 112345 +summary: Allow dimension fields to have multiple values in standard and logsdb index + mode +area: Mapping +type: enhancement +issues: + - 112232 + - 112239 diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 96598ba38a3fe..8745b46fb5458 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -107,7 +107,7 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { @Override public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return new DocumentDimensions.OnlySingleValueAllowed(); + return DocumentDimensions.Noop.INSTANCE; } @Override @@ -281,7 +281,7 @@ public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { @Override public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return new DocumentDimensions.OnlySingleValueAllowed(); + return DocumentDimensions.Noop.INSTANCE; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java index aa69e4db50e76..f4995de2b080e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java @@ -12,8 +12,6 @@ import org.elasticsearch.index.IndexSettings; import java.net.InetAddress; -import java.util.HashSet; -import java.util.Set; /** * Collects dimensions from documents. @@ -49,59 +47,45 @@ default DocumentDimensions addString(String fieldName, String value) { DocumentDimensions validate(IndexSettings settings); /** - * Makes sure that each dimension only appears on time. + * Noop implementation that doesn't perform validations on dimension fields */ - class OnlySingleValueAllowed implements DocumentDimensions { - private final Set names = new HashSet<>(); + enum Noop implements DocumentDimensions { + + INSTANCE; @Override - public DocumentDimensions addString(String fieldName, BytesRef value) { - add(fieldName); + public DocumentDimensions addString(String fieldName, BytesRef utf8Value) { return this; } - // Override to skip the UTF-8 conversion that happens in the default implementation @Override public DocumentDimensions addString(String fieldName, String value) { - add(fieldName); return this; } @Override public DocumentDimensions addIp(String fieldName, InetAddress value) { - add(fieldName); return this; } @Override public DocumentDimensions addLong(String fieldName, long value) { - add(fieldName); return this; } @Override public DocumentDimensions addUnsignedLong(String fieldName, long value) { - add(fieldName); return this; } @Override public DocumentDimensions addBoolean(String fieldName, boolean value) { - add(fieldName); return this; } @Override - public DocumentDimensions validate(final IndexSettings settings) { - // DO NOTHING + public DocumentDimensions validate(IndexSettings settings) { return this; } - - private void add(String fieldName) { - boolean isNew = names.add(fieldName); - if (false == isNew) { - throw new IllegalArgumentException("Dimension field [" + fieldName + "] cannot be a multi-valued field."); - } - } - }; + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index e08a443bd74cb..03f030a26992d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.script.BooleanFieldScript; @@ -25,11 +26,14 @@ import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; +import java.time.Instant; import java.util.List; import java.util.function.Function; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; public class BooleanFieldMapperTests extends MapperTestCase { @@ -257,17 +261,29 @@ public void testDimensionIndexedAndDocvalues() { } } - public void testDimensionMultiValuedField() throws IOException { - XContentBuilder mapping = fieldMapping(b -> { + public void testDimensionMultiValuedFieldTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { minimalMapping(b); b.field("time_series_dimension", true); - }); - DocumentMapper mapper = randomBoolean() ? createDocumentMapper(mapping) : createTimeSeriesModeDocumentMapper(mapping); + }), IndexMode.TIME_SERIES); Exception e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> b.array("field", true, false)))); assertThat(e.getCause().getMessage(), containsString("Dimension field [field] cannot be a multi-valued field")); } + public void testDimensionMultiValuedFieldNonTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { + minimalMapping(b); + b.field("time_series_dimension", true); + }), randomFrom(IndexMode.STANDARD, IndexMode.LOGSDB)); + + ParsedDocument doc = mapper.parse(source(b -> { + b.array("field", true, false); + b.field("@timestamp", Instant.now()); + })); + assertThat(doc.docs().get(0).getFields("field"), hasSize(greaterThan(1))); + } + public void testDimensionInRoutingPath() throws IOException { MapperService mapper = createMapperService(fieldMapping(b -> b.field("type", "keyword").field("time_series_dimension", true))); IndexSettings settings = createIndexSettings( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index ba9c2e6c4a299..296871e258cd7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.script.IpFieldScript; @@ -26,6 +27,7 @@ import java.io.IOException; import java.net.InetAddress; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -35,6 +37,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; public class IpFieldMapperTests extends MapperTestCase { @@ -255,11 +259,11 @@ public void testDimensionIndexedAndDocvalues() { } } - public void testDimensionMultiValuedField() throws IOException { + public void testDimensionMultiValuedFieldTSDB() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { minimalMapping(b); b.field("time_series_dimension", true); - })); + }), IndexMode.TIME_SERIES); Exception e = expectThrows( DocumentParsingException.class, @@ -268,6 +272,19 @@ public void testDimensionMultiValuedField() throws IOException { assertThat(e.getCause().getMessage(), containsString("Dimension field [field] cannot be a multi-valued field")); } + public void testDimensionMultiValuedFieldNonTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { + minimalMapping(b); + b.field("time_series_dimension", true); + }), randomFrom(IndexMode.STANDARD, IndexMode.LOGSDB)); + + ParsedDocument doc = mapper.parse(source(b -> { + b.array("field", "192.168.1.1", "192.168.1.1"); + b.field("@timestamp", Instant.now()); + })); + assertThat(doc.docs().get(0).getFields("field"), hasSize(greaterThan(1))); + } + @Override protected String generateRandomInputValue(MappedFieldType ft) { return NetworkAddress.format(randomIp(randomBoolean())); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 833b0a60827d0..d66575bc41cda 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalyzerScope; @@ -44,6 +45,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.time.Instant; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -57,6 +59,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; public class KeywordFieldMapperTests extends MapperTestCase { @@ -373,17 +377,29 @@ public void testDimensionIndexedAndDocvalues() { } } - public void testDimensionMultiValuedField() throws IOException { - XContentBuilder mapping = fieldMapping(b -> { + public void testDimensionMultiValuedFieldTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { minimalMapping(b); b.field("time_series_dimension", true); - }); - DocumentMapper mapper = randomBoolean() ? createDocumentMapper(mapping) : createTimeSeriesModeDocumentMapper(mapping); + }), IndexMode.TIME_SERIES); Exception e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> b.array("field", "1234", "45678")))); assertThat(e.getCause().getMessage(), containsString("Dimension field [field] cannot be a multi-valued field")); } + public void testDimensionMultiValuedFieldNonTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { + minimalMapping(b); + b.field("time_series_dimension", true); + }), randomFrom(IndexMode.STANDARD, IndexMode.LOGSDB)); + + ParsedDocument doc = mapper.parse(source(b -> { + b.array("field", "1234", "45678"); + b.field("@timestamp", Instant.now()); + })); + assertThat(doc.docs().get(0).getFields("field"), hasSize(greaterThan(1))); + } + public void testDimensionExtraLongKeyword() throws IOException { DocumentMapper mapper = createTimeSeriesModeDocumentMapper(fieldMapping(b -> { minimalMapping(b); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java index aba20ec5d81c8..7b7044f528c89 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DocumentMapper; @@ -34,6 +35,7 @@ import org.junit.AssumptionViolatedException; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -46,6 +48,8 @@ import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; public class FlattenedFieldMapperTests extends MapperTestCase { @@ -189,12 +193,11 @@ public void testDimensionIndexedAndDocvalues() { } } - public void testDimensionMultiValuedField() throws IOException { - XContentBuilder mapping = fieldMapping(b -> { + public void testDimensionMultiValuedFieldTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { minimalMapping(b); b.field("time_series_dimensions", List.of("key1", "key2", "field3.key3")); - }); - DocumentMapper mapper = randomBoolean() ? createDocumentMapper(mapping) : createTimeSeriesModeDocumentMapper(mapping); + }), IndexMode.TIME_SERIES); Exception e = expectThrows( DocumentParsingException.class, @@ -203,6 +206,19 @@ public void testDimensionMultiValuedField() throws IOException { assertThat(e.getCause().getMessage(), containsString("Dimension field [field.key1] cannot be a multi-valued field")); } + public void testDimensionMultiValuedFieldNonTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { + minimalMapping(b); + b.field("time_series_dimensions", List.of("key1", "key2", "field3.key3")); + }), randomFrom(IndexMode.STANDARD, IndexMode.LOGSDB)); + + ParsedDocument doc = mapper.parse(source(b -> { + b.array("field.key1", "value1", "value2"); + b.field("@timestamp", Instant.now()); + })); + assertThat(doc.docs().get(0).getFields("field"), hasSize(greaterThan(1))); + } + public void testDisableIndex() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 7c11e7446e5c5..235bb7208fb08 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -139,6 +139,14 @@ protected static String randomIndexOptions() { return randomFrom("docs", "freqs", "positions", "offsets"); } + protected final DocumentMapper createDocumentMapper(XContentBuilder mappings, IndexMode indexMode) throws IOException { + return switch (indexMode) { + case STANDARD -> createDocumentMapper(mappings); + case TIME_SERIES -> createTimeSeriesModeDocumentMapper(mappings); + case LOGSDB -> createLogsModeDocumentMapper(mappings); + }; + } + protected final DocumentMapper createDocumentMapper(XContentBuilder mappings) throws IOException { return createMapperService(mappings).documentMapper(); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/WholeNumberFieldMapperTests.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/WholeNumberFieldMapperTests.java index 99c500639bdde..4b12266196dee 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/WholeNumberFieldMapperTests.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/WholeNumberFieldMapperTests.java @@ -9,11 +9,15 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.index.IndexMode; import java.io.IOException; +import java.time.Instant; import java.util.List; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; public abstract class WholeNumberFieldMapperTests extends NumberFieldMapperTests { @@ -69,11 +73,11 @@ public void testDimensionIndexedAndDocvalues() { } } - public void testDimensionMultiValuedField() throws IOException { + public void testDimensionMultiValuedFieldTSDB() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { minimalMapping(b); b.field("time_series_dimension", true); - })); + }), IndexMode.TIME_SERIES); Exception e = expectThrows( DocumentParsingException.class, @@ -82,6 +86,19 @@ public void testDimensionMultiValuedField() throws IOException { assertThat(e.getCause().getMessage(), containsString("Dimension field [field] cannot be a multi-valued field")); } + public void testDimensionMultiValuedFieldNonTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { + minimalMapping(b); + b.field("time_series_dimension", true); + }), randomFrom(IndexMode.STANDARD, IndexMode.LOGSDB)); + + ParsedDocument doc = mapper.parse(source(b -> { + b.array("field", randomNumber(), randomNumber(), randomNumber()); + b.field("@timestamp", Instant.now()); + })); + assertThat(doc.docs().get(0).getFields("field"), hasSize(greaterThan(1))); + } + public void testMetricAndDimension() { Exception e = expectThrows(MapperParsingException.class, () -> createDocumentMapper(fieldMapping(b -> { minimalMapping(b); diff --git a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java index 753440cb0b789..46969d8dbb2ed 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java +++ b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.math.BigInteger; +import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -38,6 +39,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.matchesPattern; @@ -260,11 +263,11 @@ public void testDimensionIndexedAndDocvalues() { } } - public void testDimensionMultiValuedField() throws IOException { + public void testDimensionMultiValuedFieldTSDB() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { minimalMapping(b); b.field("time_series_dimension", true); - })); + }), IndexMode.TIME_SERIES); Exception e = expectThrows( DocumentParsingException.class, @@ -273,6 +276,19 @@ public void testDimensionMultiValuedField() throws IOException { assertThat(e.getCause().getMessage(), containsString("Dimension field [field] cannot be a multi-valued field")); } + public void testDimensionMultiValuedFieldNonTSDB() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { + minimalMapping(b); + b.field("time_series_dimension", true); + }), randomFrom(IndexMode.STANDARD, IndexMode.LOGSDB)); + + ParsedDocument doc = mapper.parse(source(b -> { + b.array("field", randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); + b.field("@timestamp", Instant.now()); + })); + assertThat(doc.docs().get(0).getFields("field"), hasSize(greaterThan(1))); + } + public void testMetricType() throws IOException { // Test default setting MapperService mapperService = createMapperService(fieldMapping(b -> minimalMapping(b))); diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml deleted file mode 100644 index d87c2a80deab8..0000000000000 --- a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -setup: - - do: - cluster.health: - wait_for_events: languid ---- -"Default data_stream.type must be logs": - - do: - bulk: - index: logs-generic.otel-default - refresh: true - body: - - create: {} - - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default"}, "attributes": { "foo": "bar"}, "body_text":"Error: Unable to connect to the database.","severity_text":"ERROR","severity_number":3,"trace_id":"abc123xyz456def789ghi012jkl345"}' - - is_false: errors - - do: - search: - index: logs-generic.otel-default - body: - fields: ["data_stream.type"] - - length: { hits.hits: 1 } - - match: { hits.hits.0.fields.data_stream\.type: ["logs"] } diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml new file mode 100644 index 0000000000000..b0cf92b87667c --- /dev/null +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml @@ -0,0 +1,53 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid +--- +"Default data_stream.type must be logs": + - do: + bulk: + index: logs-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default"}, "attributes": { "foo": "bar"}, "body_text":"Error: Unable to connect to the database.","severity_text":"ERROR","severity_number":3,"trace_id":"abc123xyz456def789ghi012jkl345"}' + - is_false: errors + - do: + search: + index: logs-generic.otel-default + body: + fields: ["data_stream.type"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.data_stream\.type: ["logs"] } +--- +"Multi value fields": + - do: + bulk: + index: logs-generic.otel-default + refresh: true + body: + - create: {} + - "@timestamp": 2024-07-18T14:48:33.467654000Z + data_stream: + type: logs + dataset: generic.otel + namespace: default + resource: + attributes: + host.ip: ["127.0.0.1", "0.0.0.0"] + attributes: + foo: [3, 2, 1] + bar: [b, c, a] + body_text: "Error: Unable to connect to the database." + severity_text: ERROR + - is_false: errors + - do: + search: + index: logs-generic.otel-default + body: + fields: ["*"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.resource\.attributes\.host\.ip: ["0.0.0.0", "127.0.0.1"] } + - match: { hits.hits.0.fields.attributes\.foo: [1, 2, 3] } + - match: { hits.hits.0.fields.attributes\.bar: [a, b, c] } From 3d389ce3ddeb4fc1c636290874f1bb341ec5cc89 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 4 Sep 2024 20:21:56 +0200 Subject: [PATCH 048/115] Some speedups aroud ListSetting (#112447) Just a random cleanup from debugging test failures. There's a couple of easy to fix slow things in list setting handling here. The biggest win is from not materializing the string list for non-string type setting values during parsing and speeding up `Strings.toString`. --- .../cluster/metadata/IndexMetadata.java | 2 +- .../org/elasticsearch/common/Strings.java | 12 ++++++- .../common/settings/Setting.java | 32 ++++++++++++------- .../http/HttpTransportSettings.java | 8 ++--- .../core/security/user/AnonymousUser.java | 3 +- .../core/watcher/crypto/CryptoService.java | 5 +-- .../audit/logfile/LoggingAuditTrail.java | 10 +++--- 7 files changed, 47 insertions(+), 25 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 742439c9a2484..611640f4a3b0f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -456,7 +456,7 @@ public Iterator> settings() { ); public static final Setting.AffixSetting> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING = Setting.prefixKeySetting( "index.routing.allocation.initial_recovery.", - key -> Setting.stringListSetting(key) + Setting::stringListSetting ); /** diff --git a/server/src/main/java/org/elasticsearch/common/Strings.java b/server/src/main/java/org/elasticsearch/common/Strings.java index 7f08f5e4ed800..553c2d1d3fb20 100644 --- a/server/src/main/java/org/elasticsearch/common/Strings.java +++ b/server/src/main/java/org/elasticsearch/common/Strings.java @@ -12,6 +12,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStream; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.Nullable; @@ -19,7 +20,10 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -767,7 +771,13 @@ public static String toString(ToXContent toXContent, ToXContent.Params params) { * @param xContentBuilder builder containing an object to converted to a string */ public static String toString(XContentBuilder xContentBuilder) { - return BytesReference.bytes(xContentBuilder).utf8ToString(); + xContentBuilder.close(); + OutputStream stream = xContentBuilder.getOutputStream(); + if (stream instanceof ByteArrayOutputStream baos) { + return baos.toString(StandardCharsets.UTF_8); + } else { + return ((BytesStream) stream).bytes().utf8ToString(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index ad9f933ec0459..2557cacf2c131 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -1696,7 +1696,7 @@ public static Setting> stringListSetting(String key, Property... pr } public static Setting> stringListSetting(String key, List defValue, Property... properties) { - return new ListSetting<>(key, null, s -> defValue, Setting::parseableStringToList, v -> {}, properties) { + return new ListSetting<>(key, null, s -> defValue, s -> parseableStringToList(s, Function.identity()), v -> {}, properties) { @Override public List get(Settings settings) { checkDeprecation(settings); @@ -1735,7 +1735,13 @@ public static Setting> listSetting( final Function singleValueParser, final Property... properties ) { - return listSetting(key, fallbackSetting, singleValueParser, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), properties); + return listSetting( + key, + fallbackSetting, + singleValueParser, + s -> parseableStringToList(fallbackSetting.getRaw(s), Function.identity()), + properties + ); } public static Setting> listSetting( @@ -1759,12 +1765,17 @@ public static Setting> listSetting( if (defaultStringValue.apply(Settings.EMPTY) == null) { throw new IllegalArgumentException("default value function must not return null"); } - Function> parser = (s) -> parseableStringToList(s).stream().map(singleValueParser).toList(); - - return new ListSetting<>(key, fallbackSetting, defaultStringValue, parser, validator, properties); + return new ListSetting<>( + key, + fallbackSetting, + defaultStringValue, + s -> parseableStringToList(s, singleValueParser), + validator, + properties + ); } - private static List parseableStringToList(String parsableString) { + private static List parseableStringToList(String parsableString, Function singleValueParser) { if ("[]".equals(parsableString)) { return List.of(); } @@ -1773,7 +1784,7 @@ private static List parseableStringToList(String parsableString) { xContentParser.nextToken(); return XContentParserUtils.parseList(xContentParser, p -> { XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, p.currentToken(), p); - return p.text(); + return singleValueParser.apply(p.text()); }); } catch (IOException e) { throw new IllegalArgumentException("failed to parse array", e); @@ -2079,7 +2090,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(key); + return key.hashCode(); } /** @@ -2088,8 +2099,7 @@ public int hashCode() { * {@link #getConcreteSetting(String)} is used to pull the updater. */ public static AffixSetting prefixKeySetting(String prefix, Function> delegateFactory) { - BiFunction> delegateFactoryWithNamespace = (ns, k) -> delegateFactory.apply(k); - return affixKeySetting(new AffixKey(prefix, null, null), delegateFactoryWithNamespace); + return affixKeySetting(prefix, null, delegateFactory); } /** @@ -2177,7 +2187,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(key); + return key.hashCode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index dcceb43b63db8..912af74e6df79 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -30,26 +30,26 @@ public final class HttpTransportSettings { public static final Setting SETTING_CORS_ALLOW_ORIGIN = new Setting<>( "http.cors.allow-origin", "", - (value) -> value, + Function.identity(), Property.NodeScope ); public static final Setting SETTING_CORS_MAX_AGE = intSetting("http.cors.max-age", 1728000, Property.NodeScope); public static final Setting SETTING_CORS_ALLOW_METHODS = new Setting<>( "http.cors.allow-methods", "OPTIONS,HEAD,GET,POST,PUT,DELETE", - (value) -> value, + Function.identity(), Property.NodeScope ); public static final Setting SETTING_CORS_ALLOW_HEADERS = new Setting<>( "http.cors.allow-headers", "X-Requested-With,Content-Type,Content-Length,Authorization,Accept,User-Agent,X-Elastic-Client-Meta", - (value) -> value, + Function.identity(), Property.NodeScope ); public static final Setting SETTING_CORS_EXPOSE_HEADERS = new Setting<>( "http.cors.expose-headers", "X-elastic-product", - (value) -> value, + Function.identity(), Property.NodeScope ); public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java index 9c1281a616ff6..03614e44f32e5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java @@ -13,6 +13,7 @@ import java.util.List; import java.util.Optional; +import java.util.function.Function; import static org.elasticsearch.xpack.core.security.SecurityField.setting; @@ -25,7 +26,7 @@ public class AnonymousUser extends ReservedUser { public static final Setting USERNAME_SETTING = new Setting<>( setting("authc.anonymous.username"), DEFAULT_ANONYMOUS_USERNAME, - s -> s, + Function.identity(), Property.NodeScope ); public static final Setting> ROLES_SETTING = Setting.stringListSetting( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java index 49e00588746fd..84ea1f5d32923 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java @@ -25,6 +25,7 @@ import java.util.Arrays; import java.util.Base64; import java.util.List; +import java.util.function.Function; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; @@ -57,7 +58,7 @@ public class CryptoService { private static final Setting ENCRYPTION_ALGO_SETTING = new Setting<>( SecurityField.setting("encryption.algorithm"), s -> DEFAULT_ENCRYPTION_ALGORITHM, - s -> s, + Function.identity(), Property.NodeScope ); private static final Setting ENCRYPTION_KEY_LENGTH_SETTING = Setting.intSetting( @@ -68,7 +69,7 @@ public class CryptoService { private static final Setting ENCRYPTION_KEY_ALGO_SETTING = new Setting<>( SecurityField.setting("encryption_key.algorithm"), DEFAULT_KEY_ALGORITH, - s -> s, + Function.identity(), Property.NodeScope ); private static final Logger logger = LogManager.getLogger(CryptoService.class); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index bc5cc4a5e6b3f..a8d2f44873194 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -327,7 +327,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { key, Collections.singletonList("*"), Function.identity(), - value -> EventFilterPolicy.parsePredicate(value), + EventFilterPolicy::parsePredicate, Property.NodeScope, Property.Dynamic ) @@ -339,7 +339,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { key, Collections.singletonList("*"), Function.identity(), - value -> EventFilterPolicy.parsePredicate(value), + EventFilterPolicy::parsePredicate, Property.NodeScope, Property.Dynamic ) @@ -351,7 +351,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { key, Collections.singletonList("*"), Function.identity(), - value -> EventFilterPolicy.parsePredicate(value), + EventFilterPolicy::parsePredicate, Property.NodeScope, Property.Dynamic ) @@ -363,7 +363,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { key, Collections.singletonList("*"), Function.identity(), - value -> EventFilterPolicy.parsePredicate(value), + EventFilterPolicy::parsePredicate, Property.NodeScope, Property.Dynamic ) @@ -375,7 +375,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { key, Collections.singletonList("*"), Function.identity(), - value -> EventFilterPolicy.parsePredicate(value), + EventFilterPolicy::parsePredicate, Property.NodeScope, Property.Dynamic ) From 2a9e47458bb394cd9d08c5a3d8b08156f23b4f66 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Wed, 4 Sep 2024 11:33:06 -0700 Subject: [PATCH 049/115] Rework fix for stale data in synthetic source to improve performance (#112480) --- .../BinaryDocValuesSyntheticFieldLoader.java | 9 +--- .../mapper/CompositeSyntheticFieldLoader.java | 51 ++++++++++++------- .../index/mapper/DocCountFieldMapper.java | 9 +--- .../mapper/IgnoreMalformedStoredValues.java | 25 +++++---- .../index/mapper/NestedObjectMapper.java | 8 +-- .../index/mapper/ObjectMapper.java | 49 +++++++++++------- ...dNumericDocValuesSyntheticFieldLoader.java | 5 ++ ...SetDocValuesSyntheticFieldLoaderLayer.java | 9 +--- .../index/mapper/SourceLoader.java | 38 ++++++++++---- .../mapper/StringStoredFieldFieldLoader.java | 18 +++---- ...ortedSetDocValuesSyntheticFieldLoader.java | 9 +--- .../vectors/DenseVectorFieldMapper.java | 14 +---- .../CompositeSyntheticFieldLoaderTests.java | 24 ++++++--- .../mapper/HistogramFieldMapper.java | 8 +-- .../AggregateDoubleMetricFieldMapper.java | 8 +-- .../mapper/ConstantKeywordFieldMapper.java | 5 ++ .../wildcard/mapper/WildcardFieldMapper.java | 8 +-- 17 files changed, 149 insertions(+), 148 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryDocValuesSyntheticFieldLoader.java index c3eb0c4c0290a..5c0f3725b7951 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryDocValuesSyntheticFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryDocValuesSyntheticFieldLoader.java @@ -14,10 +14,8 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Map; -import java.util.stream.Stream; -public abstract class BinaryDocValuesSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { +public abstract class BinaryDocValuesSyntheticFieldLoader extends SourceLoader.DocValuesBasedSyntheticFieldLoader { private final String name; private BinaryDocValues values; private boolean hasValue; @@ -28,11 +26,6 @@ protected BinaryDocValuesSyntheticFieldLoader(String name) { protected abstract void writeValue(XContentBuilder b, BytesRef value) throws IOException; - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { values = leafReader.getBinaryDocValues(name); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoader.java index 7bb1f99e81705..69f5750642697 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoader.java @@ -50,12 +50,6 @@ public CompositeSyntheticFieldLoader(String leafFieldName, String fullFieldName, @Override public Stream> storedFieldLoaders() { return parts.stream().flatMap(Layer::storedFieldLoaders).map(e -> Map.entry(e.getKey(), new StoredFieldLoader() { - @Override - public void advanceToDoc(int docId) { - storedFieldLoadersHaveValues = false; - e.getValue().advanceToDoc(docId); - } - @Override public void load(List newValues) { storedFieldLoadersHaveValues = true; @@ -79,8 +73,6 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf } return docId -> { - this.docValuesLoadersHaveValues = false; - boolean hasDocs = false; for (var loader : loaders) { hasDocs |= loader.advanceToDoc(docId); @@ -117,6 +109,18 @@ public void write(XContentBuilder b) throws IOException { part.write(b); } b.endArray(); + softReset(); + } + + private void softReset() { + storedFieldLoadersHaveValues = false; + docValuesLoadersHaveValues = false; + } + + @Override + public void reset() { + softReset(); + parts.forEach(SourceLoader.SyntheticFieldLoader::reset); } @Override @@ -139,6 +143,19 @@ public interface Layer extends SourceLoader.SyntheticFieldLoader { long valueCount(); } + public interface DocValuesLayer extends Layer { + @Override + default Stream> storedFieldLoaders() { + return Stream.empty(); + } + + @Override + default void reset() { + // Not applicable to loaders using only doc values + // since DocValuesLoader#advanceToDoc will reset the state anyway. + } + } + /** * Layer that loads malformed values stored in a dedicated field with a conventional name. * @see IgnoreMalformedStoredValues @@ -177,17 +194,7 @@ public long valueCount() { @Override public Stream> storedFieldLoaders() { - return Stream.of(Map.entry(fieldName, new SourceLoader.SyntheticFieldLoader.StoredFieldLoader() { - @Override - public void advanceToDoc(int docId) { - values = emptyList(); - } - - @Override - public void load(List newValues) { - values = newValues; - } - })); + return Stream.of(Map.entry(fieldName, newValues -> values = newValues)); } @Override @@ -205,6 +212,12 @@ public void write(XContentBuilder b) throws IOException { for (Object v : values) { writeValue(v, b); } + reset(); + } + + @Override + public void reset() { + values = emptyList(); } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java index a7283cf0a28ec..6326e23c59b92 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java @@ -20,8 +20,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.Map; -import java.util.stream.Stream; /** Mapper for the doc_count field. */ public class DocCountFieldMapper extends MetadataFieldMapper { @@ -139,15 +137,10 @@ public static PostingsEnum leafLookup(LeafReader reader) throws IOException { return reader.postings(TERM); } - private static class SyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private static class SyntheticFieldLoader extends SourceLoader.DocValuesBasedSyntheticFieldLoader { private PostingsEnum postings; private boolean hasValue; - @Override - public Stream> storedFieldLoaders() { - return Stream.empty(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { postings = leafLookup(leafReader); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java index 9265fb31a48d2..b3ff60a1d6557 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java @@ -79,6 +79,11 @@ public static IgnoreMalformedStoredValues stored(String fieldName) { */ public abstract void write(XContentBuilder b) throws IOException; + /** + * Remove stored values for this document and return to clean state to process next document. + */ + public abstract void reset(); + private static final Empty EMPTY = new Empty(); private static class Empty extends IgnoreMalformedStoredValues { @@ -94,6 +99,9 @@ public int count() { @Override public void write(XContentBuilder b) throws IOException {} + + @Override + public void reset() {} } private static class Stored extends IgnoreMalformedStoredValues { @@ -107,17 +115,7 @@ private static class Stored extends IgnoreMalformedStoredValues { @Override public Stream> storedFieldLoaders() { - return Stream.of(Map.entry(name(fieldName), new SourceLoader.SyntheticFieldLoader.StoredFieldLoader() { - @Override - public void advanceToDoc(int docId) { - values = emptyList(); - } - - @Override - public void load(List newValues) { - values = newValues; - } - })); + return Stream.of(Map.entry(name(fieldName), newValues -> values = newValues)); } @Override @@ -134,6 +132,11 @@ public void write(XContentBuilder b) throws IOException { b.value(v); } } + reset(); + } + + @Override + public void reset() { values = emptyList(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index f61f91250516a..b37d886c91759 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -31,7 +31,6 @@ import java.util.Optional; import java.util.function.Function; import java.util.function.Supplier; -import java.util.stream.Stream; import static org.elasticsearch.index.mapper.SourceFieldMetrics.NOOP; @@ -393,7 +392,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { ); } - private class NestedSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private class NestedSyntheticFieldLoader extends SourceLoader.DocValuesBasedSyntheticFieldLoader { private final org.elasticsearch.index.fieldvisitor.StoredFieldLoader storedFieldLoader; private final SourceLoader sourceLoader; private final Supplier parentBitSetProducer; @@ -415,11 +414,6 @@ private NestedSyntheticFieldLoader( this.childFilter = childFilter; } - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { this.children.clear(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 163f76ef47034..281eccef7cef5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -841,18 +841,9 @@ private SyntheticSourceFieldLoader(List field public Stream> storedFieldLoaders() { return fields.stream() .flatMap(SourceLoader.SyntheticFieldLoader::storedFieldLoaders) - .map(e -> Map.entry(e.getKey(), new StoredFieldLoader() { - @Override - public void advanceToDoc(int docId) { - storedFieldLoadersHaveValues = false; - e.getValue().advanceToDoc(docId); - } - - @Override - public void load(List newValues) { - storedFieldLoadersHaveValues = true; - e.getValue().load(newValues); - } + .map(e -> Map.entry(e.getKey(), newValues -> { + storedFieldLoadersHaveValues = true; + e.getValue().load(newValues); })); } @@ -880,8 +871,6 @@ private ObjectDocValuesLoader(List loaders) { @Override public boolean advanceToDoc(int docId) throws IOException { - docValuesLoadersHaveValues = false; - boolean anyLeafHasDocValues = false; for (DocValuesLoader docValueLoader : loaders) { boolean leafHasValue = docValueLoader.advanceToDoc(docId); @@ -930,8 +919,14 @@ public void write(XContentBuilder b) throws IOException { } for (SourceLoader.SyntheticFieldLoader field : fields) { if (field.hasValue()) { - // Skip if the field source is stored separately, to avoid double-printing. - orderedFields.computeIfAbsent(field.fieldName(), k -> new FieldWriter.FieldLoader(field)); + if (orderedFields.containsKey(field.fieldName()) == false) { + orderedFields.put(field.fieldName(), new FieldWriter.FieldLoader(field)); + } else { + // Skip if the field source is stored separately, to avoid double-printing. + // Make sure to reset the state of loader so that values stored inside will not + // be used after this document is finished. + field.reset(); + } } } @@ -947,12 +942,30 @@ public void write(XContentBuilder b) throws IOException { } } b.endObject(); + softReset(); } - @Override - public boolean setIgnoredValues(Map> objectsWithIgnoredFields) { + /** + * reset() is expensive since it will descend the hierarchy and reset the loader + * of every field. + * We perform a reset of a child field inside write() only when it is needed. + * We know that either write() or reset() was called for every field, + * so in the end of write() we can do this soft reset only. + */ + private void softReset() { + storedFieldLoadersHaveValues = false; + docValuesLoadersHaveValues = false; ignoredValuesPresent = false; + } + + @Override + public void reset() { + softReset(); + fields.forEach(SourceLoader.SyntheticFieldLoader::reset); + } + @Override + public boolean setIgnoredValues(Map> objectsWithIgnoredFields) { if (objectsWithIgnoredFields == null || objectsWithIgnoredFields.isEmpty()) { return false; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java index a2568ec91ebdf..ebb429af30f31 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java @@ -107,6 +107,11 @@ public void write(XContentBuilder b) throws IOException { } } + @Override + public void reset() { + ignoreMalformedValues.reset(); + } + private interface Values { int count(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoaderLayer.java b/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoaderLayer.java index 626e7ecb16f46..8d4570dbf8728 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoaderLayer.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoaderLayer.java @@ -19,13 +19,11 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Map; -import java.util.stream.Stream; /** * Load {@code _source} fields from {@link SortedSetDocValues}. */ -public abstract class SortedSetDocValuesSyntheticFieldLoaderLayer implements CompositeSyntheticFieldLoader.Layer { +public abstract class SortedSetDocValuesSyntheticFieldLoaderLayer implements CompositeSyntheticFieldLoader.DocValuesLayer { private static final Logger logger = LogManager.getLogger(SortedSetDocValuesSyntheticFieldLoaderLayer.class); private final String name; @@ -44,11 +42,6 @@ public String fieldName() { return name; } - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { SortedSetDocValues dv = DocValues.getSortedSet(reader, name); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java index 934e5aced644b..319fd7fc792ef 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java @@ -185,10 +185,6 @@ public Source source(LeafStoredFieldLoader storedFieldLoader, int docId) throws @Override public void write(LeafStoredFieldLoader storedFieldLoader, int docId, XContentBuilder b) throws IOException { - for (var fieldLevelStoredFieldLoader : storedFieldLoaders.values()) { - fieldLevelStoredFieldLoader.advanceToDoc(docId); - } - // Maps the names of existing objects to lists of ignored fields they contain. Map> objectsWithIgnoredFields = null; @@ -277,6 +273,11 @@ public boolean hasValue() { @Override public void write(XContentBuilder b) {} + @Override + public void reset() { + + } + @Override public String fieldName() { return ""; @@ -322,16 +323,18 @@ default boolean setIgnoredValues(Map> storedFieldLoaders() { + return Stream.empty(); + } + + @Override + public void reset() { + // Not applicable to loaders using only doc values + // since DocValuesLoader#advanceToDoc will reset the state anyway. + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java index 0f2d61374d202..a539bda6eaa41 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java @@ -31,17 +31,7 @@ public StringStoredFieldFieldLoader(String name, String simpleName) { @Override public final Stream> storedFieldLoaders() { - return Stream.of(Map.entry(name, new SourceLoader.SyntheticFieldLoader.StoredFieldLoader() { - @Override - public void advanceToDoc(int docId) { - values = emptyList(); - } - - @Override - public void load(List newValues) { - values = newValues; - } - })); + return Stream.of(Map.entry(name, newValues -> values = newValues)); } @Override @@ -65,6 +55,12 @@ public final void write(XContentBuilder b) throws IOException { } b.endArray(); } + reset(); + } + + @Override + public void reset() { + values = emptyList(); } protected abstract void write(XContentBuilder b, Object value) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedSortedSetDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedSortedSetDocValuesSyntheticFieldLoader.java index fd8bbe3b62422..87df86e33f3f1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedSortedSetDocValuesSyntheticFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedSortedSetDocValuesSyntheticFieldLoader.java @@ -16,10 +16,8 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Map; -import java.util.stream.Stream; -public class FlattenedSortedSetDocValuesSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { +public class FlattenedSortedSetDocValuesSyntheticFieldLoader extends SourceLoader.DocValuesBasedSyntheticFieldLoader { private DocValuesFieldValues docValues = NO_VALUES; private final String fieldFullPath; private final String keyedFieldFullPath; @@ -43,11 +41,6 @@ public String fieldName() { return fieldFullPath; } - @Override - public Stream> storedFieldLoaders() { - return Stream.empty(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { final SortedSetDocValues dv = DocValues.getSortedSet(reader, keyedFieldFullPath); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 6a231d15f7be8..d56fc68dbc1c5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -2173,7 +2173,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { return new DocValuesSyntheticFieldLoader(indexCreatedVersion); } - private class IndexedSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private class IndexedSyntheticFieldLoader extends SourceLoader.DocValuesBasedSyntheticFieldLoader { private FloatVectorValues values; private ByteVectorValues byteVectorValues; private boolean hasValue; @@ -2188,11 +2188,6 @@ private IndexedSyntheticFieldLoader(IndexVersion indexCreatedVersion, VectorSimi this.vectorSimilarity = vectorSimilarity; } - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { values = leafReader.getFloatVectorValues(fullPath()); @@ -2254,7 +2249,7 @@ public String fieldName() { } } - private class DocValuesSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private class DocValuesSyntheticFieldLoader extends SourceLoader.DocValuesBasedSyntheticFieldLoader { private BinaryDocValues values; private boolean hasValue; private final IndexVersion indexCreatedVersion; @@ -2263,11 +2258,6 @@ private DocValuesSyntheticFieldLoader(IndexVersion indexCreatedVersion) { this.indexCreatedVersion = indexCreatedVersion; } - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { values = leafReader.getBinaryDocValues(fullPath()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoaderTests.java index 8cbd1e52d3f44..31d6f2f3a6ef7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoaderTests.java @@ -39,9 +39,7 @@ protected void writeValue(Object value, XContentBuilder b) throws IOException { ); var storedFieldLoaders = sut.storedFieldLoaders().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - storedFieldLoaders.get("foo.one").advanceToDoc(0); storedFieldLoaders.get("foo.one").load(List.of(45L, 46L)); - storedFieldLoaders.get("foo.two").advanceToDoc(0); storedFieldLoaders.get("foo.two").load(List.of(1L)); var result = XContentBuilder.builder(XContentType.JSON.xContent()); @@ -53,7 +51,7 @@ protected void writeValue(Object value, XContentBuilder b) throws IOException { {"foo":[45,46,1]}""", Strings.toString(result)); } - public void testLoadStoredFieldAndAdvance() throws IOException { + public void testLoadStoredFieldAndReset() throws IOException { var sut = new CompositeSyntheticFieldLoader( "foo", "bar.baz.foo", @@ -66,7 +64,6 @@ protected void writeValue(Object value, XContentBuilder b) throws IOException { ); var storedFieldLoaders = sut.storedFieldLoaders().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - storedFieldLoaders.get("foo.one").advanceToDoc(0); storedFieldLoaders.get("foo.one").load(List.of(45L)); var result = XContentBuilder.builder(XContentType.JSON.xContent()); @@ -77,10 +74,9 @@ protected void writeValue(Object value, XContentBuilder b) throws IOException { assertEquals(""" {"foo":45}""", Strings.toString(result)); - storedFieldLoaders.get("foo.one").advanceToDoc(1); - var empty = XContentBuilder.builder(XContentType.JSON.xContent()); empty.startObject(); + // reset() should have been called after previous write sut.write(result); empty.endObject(); @@ -110,6 +106,11 @@ public void write(XContentBuilder b) throws IOException { b.value(46L); } + @Override + public void reset() { + + } + @Override public String fieldName() { return ""; @@ -140,6 +141,11 @@ public void write(XContentBuilder b) throws IOException { b.value(1L); } + @Override + public void reset() { + + } + @Override public String fieldName() { return ""; @@ -192,6 +198,11 @@ public void write(XContentBuilder b) throws IOException { b.value(1L); } + @Override + public void reset() { + + } + @Override public String fieldName() { return ""; @@ -205,7 +216,6 @@ public long valueCount() { ); var storedFieldLoaders = sut.storedFieldLoaders().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - storedFieldLoaders.get("foo.one").advanceToDoc(0); storedFieldLoaders.get("foo.one").load(List.of(45L, 46L)); sut.docValuesLoader(null, new int[0]).advanceToDoc(0); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java index e80279934a090..eb27f4b8cfc22 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java @@ -57,7 +57,6 @@ import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Map; -import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -528,15 +527,10 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { ); } - private class HistogramSyntheticFieldLoader implements CompositeSyntheticFieldLoader.Layer { + private class HistogramSyntheticFieldLoader implements CompositeSyntheticFieldLoader.DocValuesLayer { private final InternalHistogramValue value = new InternalHistogramValue(); private BytesRef binaryValue; - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } - @Override public SourceLoader.SyntheticFieldLoader.DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index a547e56b43949..6a481aa11fdcf 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -72,7 +72,6 @@ import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -725,7 +724,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { ); } - public static class AggregateMetricSyntheticFieldLoader implements CompositeSyntheticFieldLoader.Layer { + public static class AggregateMetricSyntheticFieldLoader implements CompositeSyntheticFieldLoader.DocValuesLayer { private final String name; private final EnumSet metrics; private final Map metricDocValues = new EnumMap<>(Metric.class); @@ -746,11 +745,6 @@ public long valueCount() { return hasValue() ? 1 : 0; } - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { metricDocValues.clear(); diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index f2222e0970ae0..ed4fa99b29eae 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -378,6 +378,11 @@ public void write(XContentBuilder b) throws IOException { } } + @Override + public void reset() { + // NOOP + } + @Override public String fieldName() { return fullPath(); diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java index a661322899a8d..82dc4c0715300 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java @@ -87,7 +87,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Stream; /** * A {@link FieldMapper} for indexing fields with ngrams for efficient wildcard matching @@ -1022,16 +1021,11 @@ protected void writeValue(Object value, XContentBuilder b) throws IOException { return new CompositeSyntheticFieldLoader(leafName(), fullPath(), loader); } - private class WildcardSyntheticFieldLoader implements CompositeSyntheticFieldLoader.Layer { + private class WildcardSyntheticFieldLoader implements CompositeSyntheticFieldLoader.DocValuesLayer { private final ByteArrayStreamInput docValuesStream = new ByteArrayStreamInput(); private int docValueCount; private BytesRef docValueBytes; - @Override - public Stream> storedFieldLoaders() { - return Stream.empty(); - } - @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { BinaryDocValues values = leafReader.getBinaryDocValues(fullPath()); From d389cff0c3cd03ab41b66457b90e166e3daaf90c Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 4 Sep 2024 20:44:11 +0200 Subject: [PATCH 050/115] Don't compute numDocs when size is set to 0 while creating query phase collector manager (#112488) A simple call to `IndexReader#numDocs` can make us go through all docs and execute scripts in case there are runtime fields involved in the search. This is necessary only when size is greater than 0, otherwise it can be avoided. This commit moves the call to the two specific if branches in place of computing numDocs before the conditional --- .../query/QueryPhaseCollectorManager.java | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java index 22b5f3d8dcafd..f298c96a1019a 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java @@ -220,8 +220,6 @@ static CollectorManager createQueryPhaseCollectorMa ); final IndexReader reader = searchContext.searcher().getIndexReader(); final Query query = searchContext.rewrittenQuery(); - // top collectors don't like a size of 0 - final int totalNumDocs = Math.max(1, reader.numDocs()); if (searchContext.size() == 0) { return new EmptyHits( postFilterWeight, @@ -232,31 +230,10 @@ static CollectorManager createQueryPhaseCollectorMa searchContext.sort(), searchContext.trackTotalHitsUpTo() ); - } else if (searchContext.scrollContext() != null) { - // we can disable the tracking of total hits after the initial scroll query - // since the total hits is preserved in the scroll context. - int trackTotalHitsUpTo = searchContext.scrollContext().totalHits != null - ? SearchContext.TRACK_TOTAL_HITS_DISABLED - : SearchContext.TRACK_TOTAL_HITS_ACCURATE; - // no matter what the value of from is - int numDocs = Math.min(searchContext.size(), totalNumDocs); - return forScroll( - postFilterWeight, - terminateAfterChecker, - aggsCollectorManager, - searchContext.minimumScore(), - searchContext.getProfilers() != null, - reader, - query, - searchContext.sort(), - numDocs, - searchContext.trackScores(), - trackTotalHitsUpTo, - hasFilterCollector, - searchContext.scrollContext(), - searchContext.numberOfShards() - ); - } else { + } + // top collectors don't like a size of 0 + final int totalNumDocs = Math.max(1, reader.numDocs()); + if (searchContext.scrollContext() == null) { int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; if (rescore) { @@ -265,38 +242,61 @@ static CollectorManager createQueryPhaseCollectorMa numDocs = Math.max(numDocs, rescoreContext.getWindowSize()); } } - if (searchContext.collapse() != null) { - boolean trackScores = searchContext.sort() == null || searchContext.trackScores(); - return forCollapsing( + if (searchContext.collapse() == null) { + return new WithHits( postFilterWeight, terminateAfterChecker, aggsCollectorManager, searchContext.minimumScore(), searchContext.getProfilers() != null, - searchContext.collapse(), + reader, + query, searchContext.sort(), + searchContext.searchAfter(), numDocs, - trackScores, - searchContext.searchAfter() + searchContext.trackScores(), + searchContext.trackTotalHitsUpTo(), + hasFilterCollector ); } else { - return new WithHits( + boolean trackScores = searchContext.sort() == null || searchContext.trackScores(); + return forCollapsing( postFilterWeight, terminateAfterChecker, aggsCollectorManager, searchContext.minimumScore(), searchContext.getProfilers() != null, - reader, - query, + searchContext.collapse(), searchContext.sort(), - searchContext.searchAfter(), numDocs, - searchContext.trackScores(), - searchContext.trackTotalHitsUpTo(), - hasFilterCollector + trackScores, + searchContext.searchAfter() ); } } + // we can disable the tracking of total hits after the initial scroll query + // since the total hits is preserved in the scroll context. + int trackTotalHitsUpTo = searchContext.scrollContext().totalHits != null + ? SearchContext.TRACK_TOTAL_HITS_DISABLED + : SearchContext.TRACK_TOTAL_HITS_ACCURATE; + // no matter what the value of from is + int numDocs = Math.min(searchContext.size(), totalNumDocs); + return forScroll( + postFilterWeight, + terminateAfterChecker, + aggsCollectorManager, + searchContext.minimumScore(), + searchContext.getProfilers() != null, + reader, + query, + searchContext.sort(), + numDocs, + searchContext.trackScores(), + trackTotalHitsUpTo, + hasFilterCollector, + searchContext.scrollContext(), + searchContext.numberOfShards() + ); } /** From dd88aa702314039ff19b218b88185ab54d85bc8c Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 4 Sep 2024 15:07:23 -0400 Subject: [PATCH 051/115] Switch serverless pr check to new pipeline --- .buildkite/pull-requests.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index de0212685a8a7..7d3db786aba7d 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -17,7 +17,7 @@ }, { "enabled": true, - "pipeline_slug": "elasticsearch-pull-request-check-serverless-submodule", + "pipeline_slug": "elasticsearch-serverless-es-pr-check", "allow_org_users": true, "allowed_repo_permissions": [ "admin", From 5ebba87d1f9174ec2d028190d5270e114049aa17 Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:11:30 -0400 Subject: [PATCH 052/115] unmute MlIndexAndAliasTests.testTrainedModelInference (#111313) see https://github.com/elastic/elasticsearch/issues/108993 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 8d445ab0d5c1d..4f6470c24e4aa 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -2,9 +2,6 @@ tests: - class: "org.elasticsearch.upgrades.SearchStatesIT" issue: "https://github.com/elastic/elasticsearch/issues/108991" method: "testCanMatch" -- class: "org.elasticsearch.upgrades.MlTrainedModelsUpgradeIT" - issue: "https://github.com/elastic/elasticsearch/issues/108993" - method: "testTrainedModelInference" - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-async-query-api/line_17} issue: https://github.com/elastic/elasticsearch/issues/109260 From 9db75d7ad2307625ea26253365fba8de32629045 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 4 Sep 2024 15:20:00 -0400 Subject: [PATCH 053/115] Change check-es-serverless triggered pipeline --- .buildkite/check-es-serverless.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/check-es-serverless.yml b/.buildkite/check-es-serverless.yml index ba6b34aa767ff..d0a17d70653ff 100644 --- a/.buildkite/check-es-serverless.yml +++ b/.buildkite/check-es-serverless.yml @@ -1,5 +1,5 @@ steps: - - trigger: elasticsearch-serverless-validate-submodule + - trigger: elasticsearch-serverless-es-pr-check label: ":elasticsearch: Check elasticsearch changes against serverless" build: message: "Validate latest elasticsearch changes" From 640b57232e0621b9558560980bbe25c7f9a4560f Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 4 Sep 2024 15:20:50 -0400 Subject: [PATCH 054/115] Revert pipeline change --- .buildkite/pull-requests.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 7d3db786aba7d..de0212685a8a7 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -17,7 +17,7 @@ }, { "enabled": true, - "pipeline_slug": "elasticsearch-serverless-es-pr-check", + "pipeline_slug": "elasticsearch-pull-request-check-serverless-submodule", "allow_org_users": true, "allowed_repo_permissions": [ "admin", From 9a9bbafaae52827cb158edfc04ea7cee013e158a Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 4 Sep 2024 15:37:32 -0400 Subject: [PATCH 055/115] Update check-es-serverless.yml --- .buildkite/check-es-serverless.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/check-es-serverless.yml b/.buildkite/check-es-serverless.yml index d0a17d70653ff..ba6b34aa767ff 100644 --- a/.buildkite/check-es-serverless.yml +++ b/.buildkite/check-es-serverless.yml @@ -1,5 +1,5 @@ steps: - - trigger: elasticsearch-serverless-es-pr-check + - trigger: elasticsearch-serverless-validate-submodule label: ":elasticsearch: Check elasticsearch changes against serverless" build: message: "Validate latest elasticsearch changes" From 41f05348f826310e2538d5556b5da72e5a11bd3c Mon Sep 17 00:00:00 2001 From: wajihaparvez Date: Wed, 4 Sep 2024 16:07:45 -0400 Subject: [PATCH 056/115] [Docs] Update Monitoring docs with integration assets info (#112164) * [Docs] Update Monitoring docs with integration assets info * [Docs] Adding test commit to fix CLA error --- docs/reference/monitoring/production.asciidoc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/reference/monitoring/production.asciidoc b/docs/reference/monitoring/production.asciidoc index 381f67e254041..86ffa99fa7f59 100644 --- a/docs/reference/monitoring/production.asciidoc +++ b/docs/reference/monitoring/production.asciidoc @@ -73,7 +73,9 @@ credentials must be valid on both the {kib} server and the monitoring cluster. *** If you plan to use {agent}, create a user that has the `remote_monitoring_collector` -<>. +<> and that the +monitoring related {fleet-guide}/install-uninstall-integration-assets.html#install-integration-assets[integration assets have been installed] +on the remote monitoring cluster. *** If you plan to use {metricbeat}, create a user that has the `remote_monitoring_collector` built-in role and a From 513e3ea0284e0edaf0a2d9528d2dd913baf8da43 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 5 Sep 2024 06:35:42 +1000 Subject: [PATCH 057/115] Mute org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests testClientServiceMutualAuthentication #112529 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4f6470c24e4aa..58d3060c90ad8 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -167,6 +167,9 @@ tests: - class: org.elasticsearch.xpack.inference.external.http.RequestBasedTaskRunnerTests method: testLoopOneAtATime issue: https://github.com/elastic/elasticsearch/issues/112471 +- class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests + method: testClientServiceMutualAuthentication + issue: https://github.com/elastic/elasticsearch/issues/112529 # Examples: # From bb1070475dd225c85d435feeb66bfb33dc215c44 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 4 Sep 2024 16:34:35 -0700 Subject: [PATCH 058/115] Expose HexFormat in Painless (#112412) Java 17 introduced a utility for parsing and formatting bytes as hexadecimal strings. This commit exposes that class in Painless. --- docs/changelog/112412.yaml | 5 +++++ .../org/elasticsearch/painless/java.util.txt | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 docs/changelog/112412.yaml diff --git a/docs/changelog/112412.yaml b/docs/changelog/112412.yaml new file mode 100644 index 0000000000000..fda53ebd1ade0 --- /dev/null +++ b/docs/changelog/112412.yaml @@ -0,0 +1,5 @@ +pr: 112412 +summary: Expose `HexFormat` in Painless +area: Infra/Scripting +type: enhancement +issues: [] diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt index 1e9e9f40985cb..045905c358cd2 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt @@ -684,6 +684,24 @@ class java.util.Hashtable { def clone() } +class java.util.HexFormat { + HexFormat of() + HexFormat ofDelimiter(String) + HexFormat withDelimiter(String) + HexFormat withPrefix(String) + HexFormat withSuffix(String) + HexFormat withUpperCase() + HexFormat withLowerCase() + String delimiter() + String prefix() + String suffix() + boolean isUpperCase() + String formatHex(byte[]) + String formatHex(byte[],int,int) + byte[] parseHex(CharSequence) + byte[] parseHex(CharSequence,int,int) +} + class java.util.IdentityHashMap { () (Map) From 6d161e3d63bedc28088246cff58ce8ffe269e112 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 5 Sep 2024 08:04:21 +0200 Subject: [PATCH 059/115] Lower the memory footprint when creating DelayedBucket (#112519) Trim list to size when creating delayed buckets. --- docs/changelog/112519.yaml | 5 +++++ .../aggregations/bucket/terms/AbstractInternalTerms.java | 9 ++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/112519.yaml diff --git a/docs/changelog/112519.yaml b/docs/changelog/112519.yaml new file mode 100644 index 0000000000000..aa8a942ef0f58 --- /dev/null +++ b/docs/changelog/112519.yaml @@ -0,0 +1,5 @@ +pr: 112519 +summary: Lower the memory footprint when creating `DelayedBucket` +area: Aggregations +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java index af1cabdc27389..5f08855048ae8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java @@ -171,13 +171,14 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { pq.add(new IteratorAndCurrent<>(buckets.iterator())); } // list of buckets coming from different shards that have the same key - List sameTermBuckets = new ArrayList<>(); + ArrayList sameTermBuckets = new ArrayList<>(); B lastBucket = null; while (pq.size() > 0) { final IteratorAndCurrent top = pq.top(); assert lastBucket == null || cmp.compare(top.current(), lastBucket) >= 0; if (lastBucket != null && cmp.compare(top.current(), lastBucket) != 0) { // the key changed so bundle up the last key's worth of buckets + sameTermBuckets.trimToSize(); sink.accept(new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); sameTermBuckets = new ArrayList<>(); } @@ -198,18 +199,20 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { } if (sameTermBuckets.isEmpty() == false) { + sameTermBuckets.trimToSize(); sink.accept(new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); } } private void reduceLegacy(List> bucketsList, AggregationReduceContext reduceContext, Consumer> sink) { - final Map> bucketMap = new HashMap<>(); + final Map> bucketMap = new HashMap<>(); for (List buckets : bucketsList) { for (B bucket : buckets) { bucketMap.computeIfAbsent(bucket.getKey(), k -> new ArrayList<>()).add(bucket); } } - for (List sameTermBuckets : bucketMap.values()) { + for (ArrayList sameTermBuckets : bucketMap.values()) { + sameTermBuckets.trimToSize(); sink.accept(new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); } } From d59df8af3e591a248a25b849612e448972068f10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Thu, 5 Sep 2024 08:14:35 +0200 Subject: [PATCH 060/115] Async search: Add ID and "is running" http headers (#112431) Add the async execution ID and "is running" flag in the response as HTTP headers. This allows users to know the request status without having to parse the response body. It was also implemented in the `/_async_search/status/` endpoint for consistency. Continuation of https://github.com/elastic/elasticsearch/pull/111840, which implemented this same thing for ESQL. Fixes https://github.com/elastic/elasticsearch/issues/109576 --- docs/changelog/112431.yaml | 6 ++ .../plugin/async-search/qa/rest/build.gradle | 5 ++ .../qa/AsyncSearchHeadersIT.java | 59 +++++++++++++++++++ .../search/TransportGetAsyncSearchAction.java | 16 ++++- .../search/TransportGetAsyncStatusAction.java | 19 ++++-- .../TransportSubmitAsyncSearchAction.java | 26 ++++++-- 6 files changed, 118 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/112431.yaml create mode 100644 x-pack/plugin/async-search/qa/rest/src/javaRestTest/java/org/elasticsearch/qa/AsyncSearchHeadersIT.java diff --git a/docs/changelog/112431.yaml b/docs/changelog/112431.yaml new file mode 100644 index 0000000000000..b8c1197bdc7ef --- /dev/null +++ b/docs/changelog/112431.yaml @@ -0,0 +1,6 @@ +pr: 112431 +summary: "Async search: Add ID and \"is running\" http headers" +area: Search +type: feature +issues: + - 109576 diff --git a/x-pack/plugin/async-search/qa/rest/build.gradle b/x-pack/plugin/async-search/qa/rest/build.gradle index b41600407be5c..4fc557a5b6048 100644 --- a/x-pack/plugin/async-search/qa/rest/build.gradle +++ b/x-pack/plugin/async-search/qa/rest/build.gradle @@ -1,6 +1,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.base-internal-es-plugin' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' @@ -10,6 +11,10 @@ esplugin { classname 'org.elasticsearch.query.DeprecatedQueryPlugin' } +dependencies { + clusterPlugins project(xpackModule('async-search')) +} + restResources { restApi { include '_common', 'indices', 'index', 'async_search' diff --git a/x-pack/plugin/async-search/qa/rest/src/javaRestTest/java/org/elasticsearch/qa/AsyncSearchHeadersIT.java b/x-pack/plugin/async-search/qa/rest/src/javaRestTest/java/org/elasticsearch/qa/AsyncSearchHeadersIT.java new file mode 100644 index 0000000000000..de2bfd4c98ec9 --- /dev/null +++ b/x-pack/plugin/async-search/qa/rest/src/javaRestTest/java/org/elasticsearch/qa/AsyncSearchHeadersIT.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.qa; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class AsyncSearchHeadersIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().plugin("x-pack-async-search").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Before + public void createIndex() throws IOException { + client().performRequest(new Request("PUT", "/test_index")); + } + + public void testAsyncHeaders() throws IOException { + Response submitResponse = client().performRequest(new Request("POST", "/test_index/_async_search?keep_on_completion=true")); + var asyncExecutionId = assertAsyncHeaders(submitResponse); + + Response statusResponse = client().performRequest(new Request("GET", "/_async_search/status/" + asyncExecutionId)); + assertAsyncHeaders(statusResponse); + + Response resultResponse = client().performRequest(new Request("GET", "/_async_search/" + asyncExecutionId)); + assertAsyncHeaders(resultResponse); + } + + private String assertAsyncHeaders(Response response) throws IOException { + var json = entityAsMap(response); + + var asyncExecutionId = (String) json.get("id"); + var isRunning = (boolean) json.get("is_running"); + + if (asyncExecutionId != null) { + assertThat(response.getHeader("X-ElasticSearch-Async-Id"), equalTo(asyncExecutionId)); + } + assertThat(response.getHeader("X-ElasticSearch-Async-Is-Running"), equalTo(isRunning ? "?1" : "?0")); + + return asyncExecutionId; + } +} diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncSearchAction.java index 38df763856242..84ca3ad1edf4f 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncSearchAction.java @@ -16,11 +16,13 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.async.AsyncExecutionId; import org.elasticsearch.xpack.core.async.AsyncResultsService; import org.elasticsearch.xpack.core.async.AsyncTaskIndexService; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; @@ -32,6 +34,7 @@ public class TransportGetAsyncSearchAction extends HandledTransportAction { private final AsyncResultsService resultsService; private final TransportService transportService; + private final ThreadContext threadContext; @Inject public TransportGetAsyncSearchAction( @@ -45,6 +48,7 @@ public TransportGetAsyncSearchAction( ) { super(GetAsyncSearchAction.NAME, transportService, actionFilters, GetAsyncResultRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.transportService = transportService; + this.threadContext = threadPool.getThreadContext(); this.resultsService = createResultsService(transportService, clusterService, registry, client, threadPool, bigArrays); } @@ -78,15 +82,23 @@ static AsyncResultsService createResultsSe @Override protected void doExecute(Task task, GetAsyncResultRequest request, ActionListener listener) { + ActionListener listenerWithHeaders = listener.map(response -> { + threadContext.addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_IS_RUNNING_HEADER, response.isRunning() ? "?1" : "?0"); + if (response.getId() != null) { + threadContext.addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, response.getId()); + } + return response; + }); + DiscoveryNode node = resultsService.getNode(request.getId()); if (node == null || resultsService.isLocalNode(node)) { - resultsService.retrieveResult(request, listener); + resultsService.retrieveResult(request, listenerWithHeaders); } else { transportService.sendRequest( node, GetAsyncSearchAction.NAME, request, - new ActionListenerResponseHandler<>(listener, AsyncSearchResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE) + new ActionListenerResponseHandler<>(listenerWithHeaders, AsyncSearchResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE) ); } } diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncStatusAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncStatusAction.java index 99719fec5dc9e..da12506d0e2c7 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncStatusAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncStatusAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -40,6 +41,7 @@ public class TransportGetAsyncStatusAction extends HandledTransportAction { private final TransportService transportService; private final ClusterService clusterService; + private final ThreadContext threadContext; private final AsyncTaskIndexService store; @Inject @@ -55,6 +57,7 @@ public TransportGetAsyncStatusAction( super(GetAsyncStatusAction.NAME, transportService, actionFilters, GetAsyncStatusRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.transportService = transportService; this.clusterService = clusterService; + this.threadContext = threadPool.getThreadContext(); this.store = new AsyncTaskIndexService<>( XPackPlugin.ASYNC_RESULTS_INDEX, clusterService, @@ -73,6 +76,12 @@ protected void doExecute(Task task, GetAsyncStatusRequest request, ActionListene DiscoveryNode node = clusterService.state().nodes().get(searchId.getTaskId().getNodeId()); DiscoveryNode localNode = clusterService.state().getNodes().getLocalNode(); + ActionListener listenerWithHeaders = listener.map(response -> { + threadContext.addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_IS_RUNNING_HEADER, response.isRunning() ? "?1" : "?0"); + threadContext.addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, response.getId()); + return response; + }); + if (node == null || Objects.equals(node, localNode)) { if (request.getKeepAlive() != null && request.getKeepAlive().getMillis() > 0) { long expirationTime = System.currentTimeMillis() + request.getKeepAlive().getMillis(); @@ -87,17 +96,17 @@ protected void doExecute(Task task, GetAsyncStatusRequest request, ActionListene AsyncSearchTask.class, AsyncSearchTask::getStatusResponse, AsyncStatusResponse::getStatusFromStoredSearch, - listener + listenerWithHeaders ); }, exc -> { RestStatus status = ExceptionsHelper.status(ExceptionsHelper.unwrapCause(exc)); if (status != RestStatus.NOT_FOUND) { logger.error(() -> format("failed to update expiration time for async-search [%s]", searchId.getEncoded()), exc); - listener.onFailure(exc); + listenerWithHeaders.onFailure(exc); } else { // the async search document or its index is not found. // That can happen if an invalid/deleted search id is provided. - listener.onFailure(new ResourceNotFoundException(searchId.getEncoded())); + listenerWithHeaders.onFailure(new ResourceNotFoundException(searchId.getEncoded())); } })); } else { @@ -107,7 +116,7 @@ protected void doExecute(Task task, GetAsyncStatusRequest request, ActionListene AsyncSearchTask.class, AsyncSearchTask::getStatusResponse, AsyncStatusResponse::getStatusFromStoredSearch, - listener + listenerWithHeaders ); } } else { @@ -115,7 +124,7 @@ protected void doExecute(Task task, GetAsyncStatusRequest request, ActionListene node, GetAsyncStatusAction.NAME, request, - new ActionListenerResponseHandler<>(listener, AsyncStatusResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE) + new ActionListenerResponseHandler<>(listenerWithHeaders, AsyncStatusResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE) ); } } diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java index f254fd6171d31..ad648af2c5571 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java @@ -92,6 +92,14 @@ protected void doExecute(Task submitTask, SubmitAsyncSearchRequest request, Acti searchRequest ); searchAction.execute(searchTask, searchRequest, searchTask.getSearchProgressActionListener()); + + ActionListener submitListenerWithHeaders = submitListener.map(response -> { + threadContext.addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_IS_RUNNING_HEADER, response.isRunning() ? "?1" : "?0"); + if (response.getId() != null) { + threadContext.addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, response.getId()); + } + return response; + }); searchTask.addCompletionListener(new ActionListener<>() { @Override public void onResponse(AsyncSearchResponse searchResponse) { @@ -119,14 +127,14 @@ public void onResponse(DocWriteResponse r) { finalResponse -> onFinalResponse(searchTask, finalResponse, () -> {}) ); } finally { - submitListener.onResponse(searchResponse); + submitListenerWithHeaders.onResponse(searchResponse); } } else { searchResponse.mustIncRef(); onFinalResponse( searchTask, searchResponse, - () -> ActionListener.respondAndRelease(submitListener, searchResponse) + () -> ActionListener.respondAndRelease(submitListenerWithHeaders, searchResponse) ); } } @@ -138,7 +146,7 @@ public void onFailure(Exception exc) { exc, searchResponse.isRunning(), "fatal failure: unable to store initial response", - submitListener + submitListenerWithHeaders ); } }, searchResponse::decRef) @@ -147,14 +155,20 @@ public void onFailure(Exception exc) { initialResp.decRef(); } } catch (Exception exc) { - onFatalFailure(searchTask, exc, searchResponse.isRunning(), "fatal failure: generic error", submitListener); + onFatalFailure( + searchTask, + exc, + searchResponse.isRunning(), + "fatal failure: generic error", + submitListenerWithHeaders + ); } } else { try (searchTask) { // the task completed within the timeout so the response is sent back to the user // with a null id since nothing was stored on the cluster. taskManager.unregister(searchTask); - ActionListener.respondAndRelease(submitListener, searchResponse.clone(null)); + ActionListener.respondAndRelease(submitListenerWithHeaders, searchResponse.clone(null)); } } } @@ -163,7 +177,7 @@ public void onFailure(Exception exc) { public void onFailure(Exception exc) { // this will only ever be called if there is an issue scheduling the thread that executes // the completion listener once the wait for completion timeout expires. - onFatalFailure(searchTask, exc, true, "fatal failure: addCompletionListener", submitListener); + onFatalFailure(searchTask, exc, true, "fatal failure: addCompletionListener", submitListenerWithHeaders); } }, request.getWaitForCompletionTimeout()); } From e54f46e4eb144387de7d24afbe48f59f2064537c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 5 Sep 2024 10:46:41 +0200 Subject: [PATCH 061/115] [DOCS] Fixes indentation issue on PUT trained models docs page. (#112538) --- .../ml/trained-models/apis/put-trained-models.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc index eef90630eb35b..e29bc8823ab29 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc @@ -588,7 +588,7 @@ Refer to <> to review the properties of the `tokenization` object. ===== -`text_similarity`:::: +`text_similarity`::: (Object, optional) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] + From 2f08d7d8be38e6b22624b192b3444730a2c62b54 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Thu, 5 Sep 2024 10:55:11 +0200 Subject: [PATCH 062/115] ESQL: Reorganize optimizer rules (#112338) - Organize the optimizer rules consistently for all 4 optimizers (logical, physical, local logical, local physical). - Move helper methods meant for optimizer rules out of the optimizers into the relevant rules or into helper classes. - Consolidate the 2 nearly identical logical ParameterizedRules into one. --- .../xpack/esql/analysis/Verifier.java | 2 +- .../optimizer/LocalLogicalPlanOptimizer.java | 303 +------ .../optimizer/LocalPhysicalPlanOptimizer.java | 758 +----------------- .../esql/optimizer/LogicalPlanOptimizer.java | 352 ++------ .../xpack/esql/optimizer/LogicalVerifier.java | 4 +- .../xpack/esql/optimizer/OptimizerRules.java | 53 -- .../optimizer/PhysicalOptimizerRules.java | 2 +- .../esql/optimizer/PhysicalPlanOptimizer.java | 78 +- .../esql/optimizer/PhysicalVerifier.java | 4 +- .../rules/PlanConsistencyChecker.java | 48 ++ .../rules/{ => logical}/AddDefaultTopN.java | 8 +- .../BooleanFunctionEqualsElimination.java | 2 +- .../{ => logical}/BooleanSimplification.java | 2 +- .../CombineBinaryComparisons.java | 2 +- .../{ => logical}/CombineDisjunctions.java | 2 +- .../rules/{ => logical}/CombineEvals.java | 2 +- .../{ => logical}/CombineProjections.java | 2 +- .../rules/{ => logical}/ConstantFolding.java | 2 +- .../{ => logical}/ConvertStringToByteRef.java | 2 +- .../DuplicateLimitAfterMvExpand.java | 2 +- .../rules/{ => logical}/FoldNull.java | 2 +- .../{ => logical}/LiteralsOnTheRight.java | 2 +- .../rules/{ => logical}/OptimizerRules.java | 23 +- .../{ => logical}/PartiallyFoldCase.java | 4 +- .../{ => logical}/PropagateEmptyRelation.java | 11 +- .../rules/{ => logical}/PropagateEquals.java | 2 +- .../{ => logical}/PropagateEvalFoldables.java | 2 +- .../{ => logical}/PropagateNullable.java | 2 +- .../rules/{ => logical}/PruneColumns.java | 2 +- .../rules/{ => logical}/PruneEmptyPlans.java | 11 +- .../rules/{ => logical}/PruneFilters.java | 5 +- .../{ => logical}/PruneLiteralsInOrderBy.java | 2 +- .../PruneOrderByBeforeStats.java | 2 +- .../PruneRedundantSortClauses.java | 2 +- .../PushDownAndCombineFilters.java | 5 +- .../PushDownAndCombineLimits.java | 2 +- .../PushDownAndCombineOrderBy.java | 5 +- .../rules/{ => logical}/PushDownEnrich.java | 5 +- .../rules/{ => logical}/PushDownEval.java | 5 +- .../{ => logical}/PushDownRegexExtract.java | 5 +- .../rules/logical/PushDownUtils.java | 210 +++++ .../{ => logical}/RemoveStatsOverride.java | 2 +- .../ReplaceAliasingEvalWithProject.java | 2 +- .../ReplaceLimitAndSortAsTopN.java | 2 +- .../{ => logical}/ReplaceLookupWithJoin.java | 2 +- .../ReplaceOrderByExpressionWithEval.java | 2 +- .../{ => logical}/ReplaceRegexMatch.java | 2 +- .../ReplaceStatsAggExpressionWithEval.java | 5 +- .../ReplaceStatsNestedExpressionWithEval.java | 5 +- .../ReplaceTrivialTypeConversions.java | 2 +- .../rules/{ => logical}/SetAsOptimized.java | 2 +- .../SimplifyComparisonsArithmetics.java | 2 +- .../SkipQueryOnEmptyMappings.java | 2 +- .../{ => logical}/SkipQueryOnLimitZero.java | 5 +- .../SplitInWithFoldableValue.java | 2 +- .../SubstituteSpatialSurrogates.java | 2 +- .../{ => logical}/SubstituteSurrogates.java | 5 +- .../rules/logical/TemporaryNameUtils.java | 40 + .../TranslateMetricsAggregate.java | 2 +- .../rules/logical/local/InferIsNotNull.java | 111 +++ .../local/InferNonNullAggConstraint.java | 75 ++ .../local/LocalPropagateEmptyRelation.java | 48 ++ .../local/ReplaceMissingFieldWithNull.java | 99 +++ .../local/ReplaceTopNWithLimitAndSort.java | 30 + .../rules/physical/ProjectAwayColumns.java | 91 +++ .../local/EnableSpatialDistancePushdown.java | 160 ++++ .../physical/local/InsertFieldExtraction.java | 86 ++ .../physical/local/LucenePushDownUtils.java | 37 + .../physical/local/PushFiltersToSource.java | 218 +++++ .../physical/local/PushLimitToSource.java | 28 + .../physical/local/PushStatsToSource.java | 124 +++ .../physical/local/PushTopNToSource.java | 64 ++ .../local/ReplaceSourceAttributes.java | 50 ++ .../local/SpatialDocValuesExtraction.java | 131 +++ .../xpack/esql/planner/PlannerUtils.java | 5 +- .../function/AbstractAggregationTestCase.java | 2 +- .../function/AbstractFunctionTestCase.java | 2 +- .../AbstractScalarFunctionTestCase.java | 2 +- .../xpack/esql/optimizer/FoldNull.java | 17 - .../LocalLogicalPlanOptimizerTests.java | 11 +- .../optimizer/LogicalPlanOptimizerTests.java | 18 +- .../optimizer/PhysicalPlanOptimizerTests.java | 3 +- .../esql/optimizer/PropagateNullable.java | 18 - ...BooleanFunctionEqualsEliminationTests.java | 2 +- .../BooleanSimplificationTests.java | 2 +- .../CombineBinaryComparisonsTests.java | 2 +- .../CombineDisjunctionsTests.java | 2 +- .../{ => logical}/ConstantFoldingTests.java | 2 +- .../rules/{ => logical}/FoldNullTests.java | 2 +- .../LiteralsOnTheRightTests.java | 2 +- .../{ => logical}/PropagateEqualsTests.java | 2 +- .../{ => logical}/PropagateNullableTests.java | 2 +- .../{ => logical}/ReplaceRegexMatchTests.java | 2 +- 93 files changed, 1862 insertions(+), 1613 deletions(-) delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/AddDefaultTopN.java (87%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/BooleanFunctionEqualsElimination.java (96%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/BooleanSimplification.java (98%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/CombineBinaryComparisons.java (99%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/CombineDisjunctions.java (99%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/CombineEvals.java (94%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/CombineProjections.java (99%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ConstantFolding.java (91%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ConvertStringToByteRef.java (95%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/DuplicateLimitAfterMvExpand.java (98%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/FoldNull.java (95%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/LiteralsOnTheRight.java (92%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/OptimizerRules.java (75%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PartiallyFoldCase.java (82%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PropagateEmptyRelation.java (88%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PropagateEquals.java (99%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PropagateEvalFoldables.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PropagateNullable.java (98%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PruneColumns.java (98%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PruneEmptyPlans.java (56%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PruneFilters.java (93%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PruneLiteralsInOrderBy.java (95%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PruneOrderByBeforeStats.java (96%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PruneRedundantSortClauses.java (94%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PushDownAndCombineFilters.java (96%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PushDownAndCombineLimits.java (98%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PushDownAndCombineOrderBy.java (82%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PushDownEnrich.java (72%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PushDownEval.java (71%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PushDownRegexExtract.java (72%) create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownUtils.java rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/RemoveStatsOverride.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceAliasingEvalWithProject.java (98%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceLimitAndSortAsTopN.java (93%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceLookupWithJoin.java (93%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceOrderByExpressionWithEval.java (96%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceRegexMatch.java (96%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceStatsAggExpressionWithEval.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceStatsNestedExpressionWithEval.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceTrivialTypeConversions.java (95%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/SetAsOptimized.java (92%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/SimplifyComparisonsArithmetics.java (99%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/SkipQueryOnEmptyMappings.java (92%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/SkipQueryOnLimitZero.java (78%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/SplitInWithFoldableValue.java (96%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/SubstituteSpatialSurrogates.java (94%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/SubstituteSurrogates.java (96%) create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/TemporaryNameUtils.java rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/TranslateMetricsAggregate.java (99%) create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/InferIsNotNull.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/InferNonNullAggConstraint.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/LocalPropagateEmptyRelation.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceTopNWithLimitAndSort.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/EnableSpatialDistancePushdown.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/LucenePushDownUtils.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushFiltersToSource.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushLimitToSource.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushStatsToSource.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSource.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java delete mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java delete mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/BooleanFunctionEqualsEliminationTests.java (97%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/BooleanSimplificationTests.java (98%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/CombineBinaryComparisonsTests.java (99%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/CombineDisjunctionsTests.java (99%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ConstantFoldingTests.java (99%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/FoldNullTests.java (97%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/LiteralsOnTheRightTests.java (95%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PropagateEqualsTests.java (99%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/PropagateNullableTests.java (99%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/{ => logical}/ReplaceRegexMatchTests.java (98%) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 374cc17b7b902..f295c4b64bd8d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -61,7 +61,7 @@ import static org.elasticsearch.xpack.esql.common.Failure.fail; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; -import static org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer.PushFiltersToSource.canPushToSource; +import static org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushFiltersToSource.canPushToSource; /** * This class is part of the planner. Responsible for failing impossible queries with a human-readable error message. In particular, this diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java index 69134923e6f47..8c54b61dc803d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java @@ -7,63 +7,27 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BlockUtils; -import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeMap; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; -import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; -import org.elasticsearch.xpack.esql.core.rule.Rule; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.util.CollectionUtils; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; -import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; -import org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules; -import org.elasticsearch.xpack.esql.optimizer.rules.PropagateEmptyRelation; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; -import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEmptyRelation; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.InferIsNotNull; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.InferNonNullAggConstraint; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.LocalPropagateEmptyRelation; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.ReplaceMissingFieldWithNull; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.ReplaceTopNWithLimitAndSort; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; -import org.elasticsearch.xpack.esql.plan.logical.TopN; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; -import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; -import org.elasticsearch.xpack.esql.planner.PlannerUtils; -import org.elasticsearch.xpack.esql.stats.SearchStats; import java.util.ArrayList; -import java.util.LinkedHashSet; import java.util.List; -import java.util.Map; -import java.util.Set; import static java.util.Arrays.asList; -import static java.util.Collections.emptySet; import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.cleanup; import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.operators; -import static org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules.TransformDirection.UP; /** - *

This class is part of the planner. Data node level logical optimizations. At this point we have access to - * {@link org.elasticsearch.xpack.esql.stats.SearchStats} which provides access to metadata about the index.

+ * This class is part of the planner. Data node level logical optimizations. At this point we have access to + * {@link org.elasticsearch.xpack.esql.stats.SearchStats} which provides access to metadata about the index. * *

NB: This class also reapplies all the rules from {@link LogicalPlanOptimizer#operators()} and {@link LogicalPlanOptimizer#cleanup()} - *

*/ public class LocalLogicalPlanOptimizer extends ParameterizedRuleExecutor { @@ -105,255 +69,4 @@ private List> replaceRules(List> listOfRul public LogicalPlan localOptimize(LogicalPlan plan) { return execute(plan); } - - /** - * Break TopN back into Limit + OrderBy to allow the order rules to kick in. - */ - public static class ReplaceTopNWithLimitAndSort extends OptimizerRules.OptimizerRule { - public ReplaceTopNWithLimitAndSort() { - super(UP); - } - - @Override - protected LogicalPlan rule(TopN plan) { - return new Limit(plan.source(), plan.limit(), new OrderBy(plan.source(), plan.child(), plan.order())); - } - } - - /** - * Look for any fields used in the plan that are missing locally and replace them with null. - * This should minimize the plan execution, in the best scenario skipping its execution all together. - */ - private static class ReplaceMissingFieldWithNull extends ParameterizedRule { - - @Override - public LogicalPlan apply(LogicalPlan plan, LocalLogicalOptimizerContext localLogicalOptimizerContext) { - return plan.transformUp(p -> missingToNull(p, localLogicalOptimizerContext.searchStats())); - } - - private LogicalPlan missingToNull(LogicalPlan plan, SearchStats stats) { - if (plan instanceof EsRelation || plan instanceof LocalRelation) { - return plan; - } - - if (plan instanceof Aggregate a) { - // don't do anything (for now) - return a; - } - // keep the aliased name - else if (plan instanceof Project project) { - var projections = project.projections(); - List newProjections = new ArrayList<>(projections.size()); - Map nullLiteral = Maps.newLinkedHashMapWithExpectedSize(DataType.types().size()); - - for (NamedExpression projection : projections) { - // Do not use the attribute name, this can deviate from the field name for union types. - if (projection instanceof FieldAttribute f && stats.exists(f.fieldName()) == false) { - DataType dt = f.dataType(); - Alias nullAlias = nullLiteral.get(f.dataType()); - // save the first field as null (per datatype) - if (nullAlias == null) { - Alias alias = new Alias(f.source(), f.name(), Literal.of(f, null), f.id()); - nullLiteral.put(dt, alias); - projection = alias.toAttribute(); - } - // otherwise point to it - else { - // since avoids creating field copies - projection = new Alias(f.source(), f.name(), nullAlias.toAttribute(), f.id()); - } - } - - newProjections.add(projection); - } - // add the first found field as null - if (nullLiteral.size() > 0) { - plan = new Eval(project.source(), project.child(), new ArrayList<>(nullLiteral.values())); - plan = new Project(project.source(), plan, newProjections); - } - } else if (plan instanceof Eval - || plan instanceof Filter - || plan instanceof OrderBy - || plan instanceof RegexExtract - || plan instanceof TopN) { - plan = plan.transformExpressionsOnlyUp( - FieldAttribute.class, - // Do not use the attribute name, this can deviate from the field name for union types. - f -> stats.exists(f.fieldName()) ? f : Literal.of(f, null) - ); - } - - return plan; - } - } - - /** - * Simplify IsNotNull targets by resolving the underlying expression to its root fields with unknown - * nullability. - * e.g. - * (x + 1) / 2 IS NOT NULL --> x IS NOT NULL AND (x+1) / 2 IS NOT NULL - * SUBSTRING(x, 3) > 4 IS NOT NULL --> x IS NOT NULL AND SUBSTRING(x, 3) > 4 IS NOT NULL - * When dealing with multiple fields, a conjunction/disjunction based on the predicate: - * (x + y) / 4 IS NOT NULL --> x IS NOT NULL AND y IS NOT NULL AND (x + y) / 4 IS NOT NULL - * This handles the case of fields nested inside functions or expressions in order to avoid: - * - having to evaluate the whole expression - * - not pushing down the filter due to expression evaluation - * IS NULL cannot be simplified since it leads to a disjunction which prevents the filter to be - * pushed down: - * (x + 1) IS NULL --> x IS NULL OR x + 1 IS NULL - * and x IS NULL cannot be pushed down - *
- * Implementation-wise this rule goes bottom-up, keeping an alias up to date to the current plan - * and then looks for replacing the target. - */ - static class InferIsNotNull extends Rule { - - @Override - public LogicalPlan apply(LogicalPlan plan) { - // the alias map is shared across the whole plan - AttributeMap aliases = new AttributeMap<>(); - // traverse bottom-up to pick up the aliases as we go - plan = plan.transformUp(p -> inspectPlan(p, aliases)); - return plan; - } - - private LogicalPlan inspectPlan(LogicalPlan plan, AttributeMap aliases) { - // inspect just this plan properties - plan.forEachExpression(Alias.class, a -> aliases.put(a.toAttribute(), a.child())); - // now go about finding isNull/isNotNull - LogicalPlan newPlan = plan.transformExpressionsOnlyUp(IsNotNull.class, inn -> inferNotNullable(inn, aliases)); - return newPlan; - } - - private Expression inferNotNullable(IsNotNull inn, AttributeMap aliases) { - Expression result = inn; - Set refs = resolveExpressionAsRootAttributes(inn.field(), aliases); - // no refs found or could not detect - return the original function - if (refs.size() > 0) { - // add IsNull for the filters along with the initial inn - var innList = CollectionUtils.combine(refs.stream().map(r -> (Expression) new IsNotNull(inn.source(), r)).toList(), inn); - result = Predicates.combineAnd(innList); - } - return result; - } - - /** - * Unroll the expression to its references to get to the root fields - * that really matter for filtering. - */ - protected Set resolveExpressionAsRootAttributes(Expression exp, AttributeMap aliases) { - Set resolvedExpressions = new LinkedHashSet<>(); - boolean changed = doResolve(exp, aliases, resolvedExpressions); - return changed ? resolvedExpressions : emptySet(); - } - - private boolean doResolve(Expression exp, AttributeMap aliases, Set resolvedExpressions) { - boolean changed = false; - // check if the expression can be skipped or is not nullabe - if (skipExpression(exp)) { - resolvedExpressions.add(exp); - } else { - for (Expression e : exp.references()) { - Expression resolved = aliases.resolve(e, e); - // found a root attribute, bail out - if (resolved instanceof Attribute a && resolved == e) { - resolvedExpressions.add(a); - // don't mark things as change if the original expression hasn't been broken down - changed |= resolved != exp; - } else { - // go further - changed |= doResolve(resolved, aliases, resolvedExpressions); - } - } - } - return changed; - } - - private static boolean skipExpression(Expression e) { - return e instanceof Coalesce; - } - } - - /** - * Local aggregation can only produce intermediate state that get wired into the global agg. - */ - private static class LocalPropagateEmptyRelation extends PropagateEmptyRelation { - - /** - * Local variant of the aggregation that returns the intermediate value. - */ - @Override - protected void aggOutput(NamedExpression agg, AggregateFunction aggFunc, BlockFactory blockFactory, List blocks) { - List output = AbstractPhysicalOperationProviders.intermediateAttributes(List.of(agg), List.of()); - for (Attribute o : output) { - DataType dataType = o.dataType(); - // boolean right now is used for the internal #seen so always return true - var value = dataType == DataType.BOOLEAN ? true - // look for count(literal) with literal != null - : aggFunc instanceof Count count && (count.foldable() == false || count.fold() != null) ? 0L - // otherwise nullify - : null; - var wrapper = BlockUtils.wrapperFor(blockFactory, PlannerUtils.toElementType(dataType), 1); - wrapper.accept(value); - blocks.add(wrapper.builder().build()); - } - } - } - - /** - * The vast majority of aggs ignore null entries - this rule adds a pushable filter, as it is cheap - * to execute, to filter this entries out to begin with. - * STATS x = min(a), y = sum(b) - * becomes - * | WHERE a IS NOT NULL OR b IS NOT NULL - * | STATS x = min(a), y = sum(b) - *
- * Unfortunately this optimization cannot be applied when grouping is necessary since it can filter out - * groups containing only null values - */ - static class InferNonNullAggConstraint extends ParameterizedOptimizerRule { - - @Override - protected LogicalPlan rule(Aggregate aggregate, LocalLogicalOptimizerContext context) { - // only look at aggregates with default grouping - if (aggregate.groupings().size() > 0) { - return aggregate; - } - - SearchStats stats = context.searchStats(); - LogicalPlan plan = aggregate; - var aggs = aggregate.aggregates(); - Set nonNullAggFields = Sets.newLinkedHashSetWithExpectedSize(aggs.size()); - for (var agg : aggs) { - if (Alias.unwrap(agg) instanceof AggregateFunction af) { - Expression field = af.field(); - // ignore literals (e.g. COUNT(1)) - // make sure the field exists at the source and is indexed (not runtime) - if (field.foldable() == false && field instanceof FieldAttribute fa && stats.isIndexed(fa.name())) { - nonNullAggFields.add(field); - } else { - // otherwise bail out since unless disjunction needs to cover _all_ fields, things get filtered out - return plan; - } - } - } - - if (nonNullAggFields.size() > 0) { - Expression condition = Predicates.combineOr( - nonNullAggFields.stream().map(f -> (Expression) new IsNotNull(aggregate.source(), f)).toList() - ); - plan = aggregate.replaceChild(new Filter(aggregate.source(), aggregate.child(), condition)); - } - return plan; - } - } - - abstract static class ParameterizedOptimizerRule extends ParameterizedRule { - - public final LogicalPlan apply(LogicalPlan plan, P context) { - return plan.transformUp(typeToken(), t -> rule(t, context)); - } - - protected abstract LogicalPlan rule(SubPlan plan, P context); - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index dad8973919e10..20f3e5c9150e5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -7,108 +7,31 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.geometry.Circle; -import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.Point; -import org.elasticsearch.geometry.utils.WellKnownBinary; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.common.Failure; -import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeMap; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.expression.TypedAttribute; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.core.expression.predicate.Range; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.MatchQueryPredicate; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogic; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RegexMatch; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardLike; -import org.elasticsearch.xpack.esql.core.querydsl.query.Query; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.esql.core.rule.Rule; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.util.CollectionUtils; -import org.elasticsearch.xpack.esql.core.util.Queries; -import org.elasticsearch.xpack.esql.core.util.Queries.Clause; -import org.elasticsearch.xpack.esql.core.util.StringUtils; -import org.elasticsearch.xpack.esql.expression.Order; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; -import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialAggregateFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesUtils; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveBinaryComparison; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules.OptimizerRule; -import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; -import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; -import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec; -import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.Stat; -import org.elasticsearch.xpack.esql.plan.physical.EvalExec; -import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; -import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; -import org.elasticsearch.xpack.esql.plan.physical.FilterExec; -import org.elasticsearch.xpack.esql.plan.physical.LimitExec; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.EnableSpatialDistancePushdown; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.InsertFieldExtraction; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushFiltersToSource; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushLimitToSource; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushStatsToSource; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushTopNToSource; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.ReplaceSourceAttributes; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.SpatialDocValuesExtraction; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.TopNExec; -import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; -import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; -import org.elasticsearch.xpack.esql.planner.EsqlTranslatorHandler; -import org.elasticsearch.xpack.esql.stats.SearchStats; -import java.nio.ByteOrder; import java.util.ArrayList; import java.util.Collection; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.LinkedList; import java.util.List; -import java.util.Set; -import java.util.function.Predicate; import static java.util.Arrays.asList; -import static java.util.Collections.emptyList; -import static java.util.Collections.singletonList; -import static org.elasticsearch.xpack.esql.core.expression.predicate.Predicates.splitAnd; -import static org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules.TransformDirection.UP; -import static org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.StatsType.COUNT; /** * Manages field extraction and pushing parts of the query into Lucene. (Query elements that are not pushed into Lucene are executed via * the compute engine) */ public class LocalPhysicalPlanOptimizer extends ParameterizedRuleExecutor { - public static final EsqlTranslatorHandler TRANSLATOR_HANDLER = new EsqlTranslatorHandler(); private final PhysicalVerifier verifier = PhysicalVerifier.INSTANCE; @@ -128,6 +51,11 @@ PhysicalPlan verify(PhysicalPlan plan) { return plan; } + @Override + protected List> batches() { + return rules(true); + } + protected List> rules(boolean optimizeForEsSource) { List> esSourceRules = new ArrayList<>(4); esSourceRules.add(new ReplaceSourceAttributes()); @@ -149,664 +77,4 @@ protected List> rules(boolean optimizeForEsSource) { return asList(pushdown, fieldExtraction); } - @Override - protected List> batches() { - return rules(true); - } - - private static class ReplaceSourceAttributes extends OptimizerRule { - - ReplaceSourceAttributes() { - super(UP); - } - - @Override - protected PhysicalPlan rule(EsSourceExec plan) { - var docId = new FieldAttribute(plan.source(), EsQueryExec.DOC_ID_FIELD.getName(), EsQueryExec.DOC_ID_FIELD); - if (plan.indexMode() == IndexMode.TIME_SERIES) { - Attribute tsid = null, timestamp = null; - for (Attribute attr : plan.output()) { - String name = attr.name(); - if (name.equals(MetadataAttribute.TSID_FIELD)) { - tsid = attr; - } else if (name.equals(MetadataAttribute.TIMESTAMP_FIELD)) { - timestamp = attr; - } - } - if (tsid == null || timestamp == null) { - throw new IllegalStateException("_tsid or @timestamp are missing from the time-series source"); - } - return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), List.of(docId, tsid, timestamp), plan.query()); - } else { - return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), List.of(docId), plan.query()); - } - } - } - - // Materialize the concrete fields that need to be extracted from the storage until the last possible moment. - // Expects the local plan to already have a projection containing the fields needed upstream. - // - // 1. add the materialization right before usage inside the local plan - // 2. materialize any missing fields needed further up the chain - /** - * @see org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer.ProjectAwayColumns - */ - static class InsertFieldExtraction extends Rule { - - @Override - public PhysicalPlan apply(PhysicalPlan plan) { - // apply the plan locally, adding a field extractor right before data is loaded - // by going bottom-up - plan = plan.transformUp(UnaryExec.class, p -> { - var missing = missingAttributes(p); - - /* - * If there is a single grouping then we'll try to use ords. Either way - * it loads the field lazily. If we have more than one field we need to - * make sure the fields are loaded for the standard hash aggregator. - */ - if (p instanceof AggregateExec agg && agg.groupings().size() == 1) { - var leaves = new LinkedList<>(); - // TODO: this seems out of place - agg.aggregates() - .stream() - .filter(a -> agg.groupings().contains(a) == false) - .forEach(a -> leaves.addAll(a.collectLeaves())); - var remove = agg.groupings().stream().filter(g -> leaves.contains(g) == false).toList(); - missing.removeAll(Expressions.references(remove)); - } - - // add extractor - if (missing.isEmpty() == false) { - // collect source attributes and add the extractor - var extractor = new FieldExtractExec(p.source(), p.child(), List.copyOf(missing)); - p = p.replaceChild(extractor); - } - - return p; - }); - - return plan; - } - - private static Set missingAttributes(PhysicalPlan p) { - var missing = new LinkedHashSet(); - var input = p.inputSet(); - - // collect field attributes used inside expressions - p.forEachExpression(TypedAttribute.class, f -> { - if (f instanceof FieldAttribute || f instanceof MetadataAttribute) { - if (input.contains(f) == false) { - missing.add(f); - } - } - }); - return missing; - } - } - - public static class PushFiltersToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule< - FilterExec, - LocalPhysicalOptimizerContext> { - - @Override - protected PhysicalPlan rule(FilterExec filterExec, LocalPhysicalOptimizerContext ctx) { - PhysicalPlan plan = filterExec; - if (filterExec.child() instanceof EsQueryExec queryExec) { - List pushable = new ArrayList<>(); - List nonPushable = new ArrayList<>(); - for (Expression exp : splitAnd(filterExec.condition())) { - (canPushToSource(exp, x -> hasIdenticalDelegate(x, ctx.searchStats())) ? pushable : nonPushable).add(exp); - } - // Combine GT, GTE, LT and LTE in pushable to Range if possible - List newPushable = combineEligiblePushableToRange(pushable); - if (newPushable.size() > 0) { // update the executable with pushable conditions - Query queryDSL = TRANSLATOR_HANDLER.asQuery(Predicates.combineAnd(newPushable)); - QueryBuilder planQuery = queryDSL.asBuilder(); - var query = Queries.combine(Clause.FILTER, asList(queryExec.query(), planQuery)); - queryExec = new EsQueryExec( - queryExec.source(), - queryExec.index(), - queryExec.indexMode(), - queryExec.output(), - query, - queryExec.limit(), - queryExec.sorts(), - queryExec.estimatedRowSize() - ); - if (nonPushable.size() > 0) { // update filter with remaining non-pushable conditions - plan = new FilterExec(filterExec.source(), queryExec, Predicates.combineAnd(nonPushable)); - } else { // prune Filter entirely - plan = queryExec; - } - } // else: nothing changes - } - - return plan; - } - - private static List combineEligiblePushableToRange(List pushable) { - List bcs = new ArrayList<>(); - List ranges = new ArrayList<>(); - List others = new ArrayList<>(); - boolean changed = false; - - pushable.forEach(e -> { - if (e instanceof GreaterThan || e instanceof GreaterThanOrEqual || e instanceof LessThan || e instanceof LessThanOrEqual) { - if (((EsqlBinaryComparison) e).right().foldable()) { - bcs.add((EsqlBinaryComparison) e); - } else { - others.add(e); - } - } else { - others.add(e); - } - }); - - for (int i = 0, step = 1; i < bcs.size() - 1; i += step, step = 1) { - BinaryComparison main = bcs.get(i); - for (int j = i + 1; j < bcs.size(); j++) { - BinaryComparison other = bcs.get(j); - if (main.left().semanticEquals(other.left())) { - // >/>= AND />= - else if ((other instanceof GreaterThan || other instanceof GreaterThanOrEqual) - && (main instanceof LessThan || main instanceof LessThanOrEqual)) { - bcs.remove(j); - bcs.remove(i); - - ranges.add( - new Range( - main.source(), - main.left(), - other.right(), - other instanceof GreaterThanOrEqual, - main.right(), - main instanceof LessThanOrEqual, - main.zoneId() - ) - ); - - changed = true; - step = 0; - break; - } - } - } - } - return changed ? CollectionUtils.combine(others, bcs, ranges) : pushable; - } - - public static boolean canPushToSource(Expression exp, Predicate hasIdenticalDelegate) { - if (exp instanceof BinaryComparison bc) { - return isAttributePushable(bc.left(), bc, hasIdenticalDelegate) && bc.right().foldable(); - } else if (exp instanceof InsensitiveBinaryComparison bc) { - return isAttributePushable(bc.left(), bc, hasIdenticalDelegate) && bc.right().foldable(); - } else if (exp instanceof BinaryLogic bl) { - return canPushToSource(bl.left(), hasIdenticalDelegate) && canPushToSource(bl.right(), hasIdenticalDelegate); - } else if (exp instanceof In in) { - return isAttributePushable(in.value(), null, hasIdenticalDelegate) && Expressions.foldable(in.list()); - } else if (exp instanceof Not not) { - return canPushToSource(not.field(), hasIdenticalDelegate); - } else if (exp instanceof UnaryScalarFunction usf) { - if (usf instanceof RegexMatch || usf instanceof IsNull || usf instanceof IsNotNull) { - if (usf instanceof IsNull || usf instanceof IsNotNull) { - if (usf.field() instanceof FieldAttribute fa && fa.dataType().equals(DataType.TEXT)) { - return true; - } - } - return isAttributePushable(usf.field(), usf, hasIdenticalDelegate); - } - } else if (exp instanceof CIDRMatch cidrMatch) { - return isAttributePushable(cidrMatch.ipField(), cidrMatch, hasIdenticalDelegate) - && Expressions.foldable(cidrMatch.matches()); - } else if (exp instanceof SpatialRelatesFunction bc) { - return bc.canPushToSource(LocalPhysicalPlanOptimizer::isAggregatable); - } else if (exp instanceof MatchQueryPredicate mqp) { - return mqp.field() instanceof FieldAttribute && DataType.isString(mqp.field().dataType()); - } else if (exp instanceof StringQueryPredicate) { - return true; - } - return false; - } - - private static boolean isAttributePushable( - Expression expression, - Expression operation, - Predicate hasIdenticalDelegate - ) { - if (isPushableFieldAttribute(expression, hasIdenticalDelegate)) { - return true; - } - if (expression instanceof MetadataAttribute ma && ma.searchable()) { - return operation == null - // no range or regex queries supported with metadata fields - || operation instanceof Equals - || operation instanceof NotEquals - || operation instanceof WildcardLike; - } - return false; - } - } - - /** - * this method is supposed to be used to define if a field can be used for exact push down (eg. sort or filter). - * "aggregatable" is the most accurate information we can have from field_caps as of now. - * Pushing down operations on fields that are not aggregatable would result in an error. - */ - private static boolean isAggregatable(FieldAttribute f) { - return f.exactAttribute().field().isAggregatable(); - } - - private static class PushLimitToSource extends OptimizerRule { - @Override - protected PhysicalPlan rule(LimitExec limitExec) { - PhysicalPlan plan = limitExec; - PhysicalPlan child = limitExec.child(); - if (child instanceof EsQueryExec queryExec) { // add_task_parallelism_above_query: false - plan = queryExec.withLimit(limitExec.limit()); - } else if (child instanceof ExchangeExec exchangeExec && exchangeExec.child() instanceof EsQueryExec queryExec) { - plan = exchangeExec.replaceChild(queryExec.withLimit(limitExec.limit())); - } - return plan; - } - } - - private static class PushTopNToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule< - TopNExec, - LocalPhysicalOptimizerContext> { - @Override - protected PhysicalPlan rule(TopNExec topNExec, LocalPhysicalOptimizerContext ctx) { - PhysicalPlan plan = topNExec; - PhysicalPlan child = topNExec.child(); - if (canPushSorts(child) && canPushDownOrders(topNExec.order(), x -> hasIdenticalDelegate(x, ctx.searchStats()))) { - var sorts = buildFieldSorts(topNExec.order()); - var limit = topNExec.limit(); - - if (child instanceof ExchangeExec exchangeExec && exchangeExec.child() instanceof EsQueryExec queryExec) { - plan = exchangeExec.replaceChild(queryExec.withSorts(sorts).withLimit(limit)); - } else { - plan = ((EsQueryExec) child).withSorts(sorts).withLimit(limit); - } - } - return plan; - } - - private boolean canPushDownOrders(List orders, Predicate hasIdenticalDelegate) { - // allow only exact FieldAttributes (no expressions) for sorting - return orders.stream().allMatch(o -> isPushableFieldAttribute(o.child(), hasIdenticalDelegate)); - } - - private List buildFieldSorts(List orders) { - List sorts = new ArrayList<>(orders.size()); - for (Order o : orders) { - sorts.add(new EsQueryExec.FieldSort(((FieldAttribute) o.child()).exactAttribute(), o.direction(), o.nullsPosition())); - } - return sorts; - } - } - - private static boolean canPushSorts(PhysicalPlan plan) { - if (plan instanceof EsQueryExec queryExec) { - return queryExec.canPushSorts(); - } - if (plan instanceof ExchangeExec exchangeExec && exchangeExec.child() instanceof EsQueryExec queryExec) { - return queryExec.canPushSorts(); - } - return false; - } - - /** - * Looks for the case where certain stats exist right before the query and thus can be pushed down. - */ - private static class PushStatsToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule< - AggregateExec, - LocalPhysicalOptimizerContext> { - - @Override - protected PhysicalPlan rule(AggregateExec aggregateExec, LocalPhysicalOptimizerContext context) { - PhysicalPlan plan = aggregateExec; - if (aggregateExec.child() instanceof EsQueryExec queryExec) { - var tuple = pushableStats(aggregateExec, context); - - // for the moment support pushing count just for one field - List stats = tuple.v2(); - if (stats.size() > 1) { - return aggregateExec; - } - - // TODO: handle case where some aggs cannot be pushed down by breaking the aggs into two sources (regular + stats) + union - // use the stats since the attributes are larger in size (due to seen) - if (tuple.v2().size() == aggregateExec.aggregates().size()) { - plan = new EsStatsQueryExec( - aggregateExec.source(), - queryExec.index(), - queryExec.query(), - queryExec.limit(), - tuple.v1(), - tuple.v2() - ); - } - } - return plan; - } - - private Tuple, List> pushableStats(AggregateExec aggregate, LocalPhysicalOptimizerContext context) { - AttributeMap stats = new AttributeMap<>(); - Tuple, List> tuple = new Tuple<>(new ArrayList<>(), new ArrayList<>()); - - if (aggregate.groupings().isEmpty()) { - for (NamedExpression agg : aggregate.aggregates()) { - var attribute = agg.toAttribute(); - Stat stat = stats.computeIfAbsent(attribute, a -> { - if (agg instanceof Alias as) { - Expression child = as.child(); - if (child instanceof Count count) { - var target = count.field(); - String fieldName = null; - QueryBuilder query = null; - // TODO: add count over field (has to be field attribute) - if (target.foldable()) { - fieldName = StringUtils.WILDCARD; - } - // check if regular field - else { - if (target instanceof FieldAttribute fa) { - var fName = fa.name(); - if (context.searchStats().isSingleValue(fName)) { - fieldName = fa.name(); - query = QueryBuilders.existsQuery(fieldName); - } - } - } - if (fieldName != null) { - return new Stat(fieldName, COUNT, query); - } - } - } - return null; - }); - if (stat != null) { - List intermediateAttributes = AbstractPhysicalOperationProviders.intermediateAttributes( - singletonList(agg), - emptyList() - ); - // TODO: the attributes have been recreated here; they will have wrong name ids, and the dependency check will - // probably fail when we fix https://github.com/elastic/elasticsearch/issues/105436. - // We may need to refactor AbstractPhysicalOperationProviders.intermediateAttributes so it doesn't return just - // a list of attributes, but a mapping from the logical to the physical attributes. - tuple.v1().addAll(intermediateAttributes); - tuple.v2().add(stat); - } - } - } - - return tuple; - } - } - - public static boolean hasIdenticalDelegate(FieldAttribute attr, SearchStats stats) { - return stats.hasIdenticalDelegate(attr.name()); - } - - public static boolean isPushableFieldAttribute(Expression exp, Predicate hasIdenticalDelegate) { - if (exp instanceof FieldAttribute fa && fa.getExactInfo().hasExact() && isAggregatable(fa)) { - return fa.dataType() != DataType.TEXT || hasIdenticalDelegate.test(fa); - } - return false; - } - - private static class SpatialDocValuesExtraction extends OptimizerRule { - @Override - protected PhysicalPlan rule(AggregateExec aggregate) { - var foundAttributes = new HashSet(); - - PhysicalPlan plan = aggregate.transformDown(UnaryExec.class, exec -> { - if (exec instanceof AggregateExec agg) { - var orderedAggregates = new ArrayList(); - var changedAggregates = false; - for (NamedExpression aggExpr : agg.aggregates()) { - if (aggExpr instanceof Alias as && as.child() instanceof SpatialAggregateFunction af) { - if (af.field() instanceof FieldAttribute fieldAttribute - && allowedForDocValues(fieldAttribute, agg, foundAttributes)) { - // We need to both mark the field to load differently, and change the spatial function to know to use it - foundAttributes.add(fieldAttribute); - changedAggregates = true; - orderedAggregates.add(as.replaceChild(af.withDocValues())); - } else { - orderedAggregates.add(aggExpr); - } - } else { - orderedAggregates.add(aggExpr); - } - } - if (changedAggregates) { - exec = new AggregateExec( - agg.source(), - agg.child(), - agg.groupings(), - orderedAggregates, - agg.getMode(), - agg.intermediateAttributes(), - agg.estimatedRowSize() - ); - } - } - if (exec instanceof EvalExec evalExec) { - List fields = evalExec.fields(); - List changed = fields.stream() - .map( - f -> (Alias) f.transformDown( - SpatialRelatesFunction.class, - spatialRelatesFunction -> (spatialRelatesFunction.hasFieldAttribute(foundAttributes)) - ? spatialRelatesFunction.withDocValues(foundAttributes) - : spatialRelatesFunction - ) - ) - .toList(); - if (changed.equals(fields) == false) { - exec = new EvalExec(exec.source(), exec.child(), changed); - } - } - if (exec instanceof FilterExec filterExec) { - // Note that ST_CENTROID does not support shapes, but SpatialRelatesFunction does, so when we extend the centroid - // to support shapes, we need to consider loading shape doc-values for both centroid and relates (ST_INTERSECTS) - var condition = filterExec.condition() - .transformDown( - SpatialRelatesFunction.class, - spatialRelatesFunction -> (spatialRelatesFunction.hasFieldAttribute(foundAttributes)) - ? spatialRelatesFunction.withDocValues(foundAttributes) - : spatialRelatesFunction - ); - if (filterExec.condition().equals(condition) == false) { - exec = new FilterExec(filterExec.source(), filterExec.child(), condition); - } - } - if (exec instanceof FieldExtractExec fieldExtractExec) { - // Tell the field extractor that it should extract the field from doc-values instead of source values - var attributesToExtract = fieldExtractExec.attributesToExtract(); - Set docValuesAttributes = new HashSet<>(); - for (Attribute found : foundAttributes) { - if (attributesToExtract.contains(found)) { - docValuesAttributes.add(found); - } - } - if (docValuesAttributes.size() > 0) { - exec = new FieldExtractExec(exec.source(), exec.child(), attributesToExtract, docValuesAttributes); - } - } - return exec; - }); - return plan; - } - - /** - * This function disallows the use of more than one field for doc-values extraction in the same spatial relation function. - * This is because comparing two doc-values fields is not supported in the current implementation. - */ - private boolean allowedForDocValues(FieldAttribute fieldAttribute, AggregateExec agg, Set foundAttributes) { - var candidateDocValuesAttributes = new HashSet<>(foundAttributes); - candidateDocValuesAttributes.add(fieldAttribute); - var spatialRelatesAttributes = new HashSet(); - agg.forEachExpressionDown(SpatialRelatesFunction.class, relatesFunction -> { - candidateDocValuesAttributes.forEach(candidate -> { - if (relatesFunction.hasFieldAttribute(Set.of(candidate))) { - spatialRelatesAttributes.add(candidate); - } - }); - }); - // Disallow more than one spatial field to be extracted using doc-values (for now) - return spatialRelatesAttributes.size() < 2; - } - } - - /** - * When a spatial distance predicate can be pushed down to lucene, this is done by capturing the distance within the same function. - * In principle this is like re-writing the predicate: - *
WHERE ST_DISTANCE(field, TO_GEOPOINT("POINT(0 0)")) <= 10000
- * as: - *
WHERE ST_INTERSECTS(field, TO_GEOSHAPE("CIRCLE(0,0,10000)"))
- */ - public static class EnableSpatialDistancePushdown extends PhysicalOptimizerRules.ParameterizedOptimizerRule< - FilterExec, - LocalPhysicalOptimizerContext> { - - @Override - protected PhysicalPlan rule(FilterExec filterExec, LocalPhysicalOptimizerContext ctx) { - PhysicalPlan plan = filterExec; - if (filterExec.child() instanceof EsQueryExec) { - // Find and rewrite any binary comparisons that involve a distance function and a literal - var rewritten = filterExec.condition().transformDown(EsqlBinaryComparison.class, comparison -> { - ComparisonType comparisonType = ComparisonType.from(comparison.getFunctionType()); - if (comparison.left() instanceof StDistance dist && comparison.right().foldable()) { - return rewriteComparison(comparison, dist, comparison.right(), comparisonType); - } else if (comparison.right() instanceof StDistance dist && comparison.left().foldable()) { - return rewriteComparison(comparison, dist, comparison.left(), ComparisonType.invert(comparisonType)); - } - return comparison; - }); - if (rewritten.equals(filterExec.condition()) == false) { - plan = new FilterExec(filterExec.source(), filterExec.child(), rewritten); - } - } - - return plan; - } - - private Expression rewriteComparison( - EsqlBinaryComparison comparison, - StDistance dist, - Expression literal, - ComparisonType comparisonType - ) { - Object value = literal.fold(); - if (value instanceof Number number) { - if (dist.right().foldable()) { - return rewriteDistanceFilter(comparison, dist.left(), dist.right(), number, comparisonType); - } else if (dist.left().foldable()) { - return rewriteDistanceFilter(comparison, dist.right(), dist.left(), number, comparisonType); - } - } - return comparison; - } - - private Expression rewriteDistanceFilter( - EsqlBinaryComparison comparison, - Expression spatialExp, - Expression literalExp, - Number number, - ComparisonType comparisonType - ) { - Geometry geometry = SpatialRelatesUtils.makeGeometryFromLiteral(literalExp); - if (geometry instanceof Point point) { - double distance = number.doubleValue(); - Source source = comparison.source(); - if (comparisonType.lt) { - distance = comparisonType.eq ? distance : Math.nextDown(distance); - return new SpatialIntersects(source, spatialExp, makeCircleLiteral(point, distance, literalExp)); - } else if (comparisonType.gt) { - distance = comparisonType.eq ? distance : Math.nextUp(distance); - return new SpatialDisjoint(source, spatialExp, makeCircleLiteral(point, distance, literalExp)); - } else if (comparisonType.eq) { - return new And( - source, - new SpatialIntersects(source, spatialExp, makeCircleLiteral(point, distance, literalExp)), - new SpatialDisjoint(source, spatialExp, makeCircleLiteral(point, Math.nextDown(distance), literalExp)) - ); - } - } - return comparison; - } - - private Literal makeCircleLiteral(Point point, double distance, Expression literalExpression) { - var circle = new Circle(point.getX(), point.getY(), distance); - var wkb = WellKnownBinary.toWKB(circle, ByteOrder.LITTLE_ENDIAN); - return new Literal(literalExpression.source(), new BytesRef(wkb), DataType.GEO_SHAPE); - } - - /** - * This enum captures the key differences between various inequalities as perceived from the spatial distance function. - * In particular, we need to know which direction the inequality points, with lt=true meaning the left is expected to be smaller - * than the right. And eq=true meaning we expect euality as well. We currently don't support Equals and NotEquals, so the third - * field disables those. - */ - enum ComparisonType { - LTE(true, false, true), - LT(true, false, false), - GTE(false, true, true), - GT(false, true, false), - EQ(false, false, true); - - private final boolean lt; - private final boolean gt; - private final boolean eq; - - ComparisonType(boolean lt, boolean gt, boolean eq) { - this.lt = lt; - this.gt = gt; - this.eq = eq; - } - - static ComparisonType from(EsqlBinaryComparison.BinaryComparisonOperation op) { - return switch (op) { - case LT -> LT; - case LTE -> LTE; - case GT -> GT; - case GTE -> GTE; - default -> EQ; - }; - } - - static ComparisonType invert(ComparisonType comparisonType) { - return switch (comparisonType) { - case LT -> GT; - case LTE -> GTE; - case GT -> LT; - case GTE -> LTE; - default -> EQ; - }; - } - } - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 5fcd0e00d866a..459e3f4d0284c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -7,87 +7,61 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.common.Failures; -import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeMap; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.NameId; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.Order; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.optimizer.rules.AddDefaultTopN; -import org.elasticsearch.xpack.esql.optimizer.rules.BooleanFunctionEqualsElimination; -import org.elasticsearch.xpack.esql.optimizer.rules.BooleanSimplification; -import org.elasticsearch.xpack.esql.optimizer.rules.CombineBinaryComparisons; -import org.elasticsearch.xpack.esql.optimizer.rules.CombineDisjunctions; -import org.elasticsearch.xpack.esql.optimizer.rules.CombineEvals; -import org.elasticsearch.xpack.esql.optimizer.rules.CombineProjections; -import org.elasticsearch.xpack.esql.optimizer.rules.ConstantFolding; -import org.elasticsearch.xpack.esql.optimizer.rules.ConvertStringToByteRef; -import org.elasticsearch.xpack.esql.optimizer.rules.DuplicateLimitAfterMvExpand; -import org.elasticsearch.xpack.esql.optimizer.rules.FoldNull; -import org.elasticsearch.xpack.esql.optimizer.rules.LiteralsOnTheRight; -import org.elasticsearch.xpack.esql.optimizer.rules.PartiallyFoldCase; -import org.elasticsearch.xpack.esql.optimizer.rules.PropagateEmptyRelation; -import org.elasticsearch.xpack.esql.optimizer.rules.PropagateEquals; -import org.elasticsearch.xpack.esql.optimizer.rules.PropagateEvalFoldables; -import org.elasticsearch.xpack.esql.optimizer.rules.PropagateNullable; -import org.elasticsearch.xpack.esql.optimizer.rules.PruneColumns; -import org.elasticsearch.xpack.esql.optimizer.rules.PruneEmptyPlans; -import org.elasticsearch.xpack.esql.optimizer.rules.PruneFilters; -import org.elasticsearch.xpack.esql.optimizer.rules.PruneLiteralsInOrderBy; -import org.elasticsearch.xpack.esql.optimizer.rules.PruneOrderByBeforeStats; -import org.elasticsearch.xpack.esql.optimizer.rules.PruneRedundantSortClauses; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownAndCombineFilters; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownAndCombineLimits; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownAndCombineOrderBy; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownEnrich; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownEval; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownRegexExtract; -import org.elasticsearch.xpack.esql.optimizer.rules.RemoveStatsOverride; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceAliasingEvalWithProject; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceLimitAndSortAsTopN; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceLookupWithJoin; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceOrderByExpressionWithEval; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceRegexMatch; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceStatsAggExpressionWithEval; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceStatsNestedExpressionWithEval; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceTrivialTypeConversions; -import org.elasticsearch.xpack.esql.optimizer.rules.SetAsOptimized; -import org.elasticsearch.xpack.esql.optimizer.rules.SimplifyComparisonsArithmetics; -import org.elasticsearch.xpack.esql.optimizer.rules.SkipQueryOnEmptyMappings; -import org.elasticsearch.xpack.esql.optimizer.rules.SkipQueryOnLimitZero; -import org.elasticsearch.xpack.esql.optimizer.rules.SplitInWithFoldableValue; -import org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSpatialSurrogates; -import org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSurrogates; -import org.elasticsearch.xpack.esql.optimizer.rules.TranslateMetricsAggregate; -import org.elasticsearch.xpack.esql.plan.GeneratingPlan; -import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.AddDefaultTopN; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.BooleanFunctionEqualsElimination; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.BooleanSimplification; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.CombineBinaryComparisons; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.CombineDisjunctions; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.CombineEvals; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.CombineProjections; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ConstantFolding; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ConvertStringToByteRef; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.DuplicateLimitAfterMvExpand; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.FoldNull; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.LiteralsOnTheRight; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PartiallyFoldCase; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEmptyRelation; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEquals; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEvalFoldables; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateNullable; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PruneColumns; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PruneEmptyPlans; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PruneFilters; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PruneLiteralsInOrderBy; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PruneOrderByBeforeStats; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PruneRedundantSortClauses; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownAndCombineFilters; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownAndCombineLimits; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownAndCombineOrderBy; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownEnrich; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownEval; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownRegexExtract; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.RemoveStatsOverride; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceAliasingEvalWithProject; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceLimitAndSortAsTopN; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceLookupWithJoin; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceOrderByExpressionWithEval; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceRegexMatch; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsAggExpressionWithEval; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsNestedExpressionWithEval; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceTrivialTypeConversions; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SetAsOptimized; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SimplifyComparisonsArithmetics; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SkipQueryOnEmptyMappings; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SkipQueryOnLimitZero; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SplitInWithFoldableValue; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SubstituteSpatialSurrogates; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SubstituteSurrogates; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.TranslateMetricsAggregate; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashSet; import java.util.List; -import java.util.Map; -import java.util.Set; import static java.util.Arrays.asList; -import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputExpressions; /** *

This class is part of the planner

@@ -119,30 +93,6 @@ public LogicalPlanOptimizer(LogicalOptimizerContext optimizerContext) { super(optimizerContext); } - public static String temporaryName(Expression inner, Expression outer, int suffix) { - String in = toString(inner); - String out = toString(outer); - return Attribute.rawTemporaryName(in, out, String.valueOf(suffix)); - } - - public static String locallyUniqueTemporaryName(String inner, String outer) { - return Attribute.rawTemporaryName(inner, outer, (new NameId()).toString()); - } - - static String toString(Expression ex) { - return ex instanceof AggregateFunction af ? af.functionName() : extractString(ex); - } - - static String extractString(Expression ex) { - return ex instanceof NamedExpression ne ? ne.name() : limitToString(ex.sourceText()).replace(' ', '_'); - } - - static int TO_STRING_LIMIT = 16; - - static String limitToString(String string) { - return string.length() > TO_STRING_LIMIT ? string.substring(0, TO_STRING_LIMIT - 1) + ">" : string; - } - public LogicalPlan optimize(LogicalPlan verified) { var optimized = execute(verified); @@ -159,6 +109,14 @@ protected List> batches() { return rules(); } + protected static List> rules() { + var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); + var defaultTopN = new Batch<>("Add default TopN", new AddDefaultTopN()); + var label = new Batch<>("Set as Optimized", Limiter.ONCE, new SetAsOptimized()); + + return asList(substitutions(), operators(), skip, cleanup(), defaultTopN, label); + } + protected static Batch substitutions() { return new Batch<>( "Substitutions", @@ -226,206 +184,4 @@ protected static Batch operators() { protected static Batch cleanup() { return new Batch<>("Clean Up", new ReplaceLimitAndSortAsTopN()); } - - protected static List> rules() { - var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); - var defaultTopN = new Batch<>("Add default TopN", new AddDefaultTopN()); - var label = new Batch<>("Set as Optimized", Limiter.ONCE, new SetAsOptimized()); - - return asList(substitutions(), operators(), skip, cleanup(), defaultTopN, label); - } - - public static LogicalPlan skipPlan(UnaryPlan plan) { - return new LocalRelation(plan.source(), plan.output(), LocalSupplier.EMPTY); - } - - public static LogicalPlan skipPlan(UnaryPlan plan, LocalSupplier supplier) { - return new LocalRelation(plan.source(), plan.output(), supplier); - } - - /** - * Pushes LogicalPlans which generate new attributes (Eval, Grok/Dissect, Enrich), past OrderBys and Projections. - * Although it seems arbitrary whether the OrderBy or the generating plan is executed first, this transformation ensures that OrderBys - * only separated by e.g. an Eval can be combined by {@link PushDownAndCombineOrderBy}. - *

- * E.g. {@code ... | sort a | eval x = b + 1 | sort x} becomes {@code ... | eval x = b + 1 | sort a | sort x} - *

- * Ordering the generating plans before the OrderBys has the advantage that it's always possible to order the plans like this. - * E.g., in the example above it would not be possible to put the eval after the two orderBys. - *

- * In case one of the generating plan's attributes would shadow the OrderBy's attributes, we alias the generated attribute first. - *

- * E.g. {@code ... | sort a | eval a = b + 1 | ...} becomes {@code ... | eval $$a = a | eval a = b + 1 | sort $$a | drop $$a ...} - *

- * In case the generating plan's attributes would shadow the Project's attributes, we rename the generated attributes in place. - *

- * E.g. {@code ... | rename a as z | eval a = b + 1 | ...} becomes {@code ... eval $$a = b + 1 | rename a as z, $$a as a ...} - */ - public static > LogicalPlan pushGeneratingPlanPastProjectAndOrderBy(Plan generatingPlan) { - LogicalPlan child = generatingPlan.child(); - if (child instanceof OrderBy orderBy) { - Set evalFieldNames = new LinkedHashSet<>(Expressions.names(generatingPlan.generatedAttributes())); - - // Look for attributes in the OrderBy's expressions and create aliases with temporary names for them. - AttributeReplacement nonShadowedOrders = renameAttributesInExpressions(evalFieldNames, orderBy.order()); - - AttributeMap aliasesForShadowedOrderByAttrs = nonShadowedOrders.replacedAttributes; - @SuppressWarnings("unchecked") - List newOrder = (List) (List) nonShadowedOrders.rewrittenExpressions; - - if (aliasesForShadowedOrderByAttrs.isEmpty() == false) { - List newAliases = new ArrayList<>(aliasesForShadowedOrderByAttrs.values()); - - LogicalPlan plan = new Eval(orderBy.source(), orderBy.child(), newAliases); - plan = generatingPlan.replaceChild(plan); - plan = new OrderBy(orderBy.source(), plan, newOrder); - plan = new Project(generatingPlan.source(), plan, generatingPlan.output()); - - return plan; - } - - return orderBy.replaceChild(generatingPlan.replaceChild(orderBy.child())); - } else if (child instanceof Project project) { - // We need to account for attribute shadowing: a rename might rely on a name generated in an Eval/Grok/Dissect/Enrich. - // E.g. in: - // - // Eval[[2 * x{f}#1 AS y]] - // \_Project[[x{f}#1, y{f}#2, y{f}#2 AS z]] - // - // Just moving the Eval down breaks z because we shadow y{f}#2. - // Instead, we use a different alias in the Eval, eventually renaming back to y: - // - // Project[[x{f}#1, y{f}#2 as z, $$y{r}#3 as y]] - // \_Eval[[2 * x{f}#1 as $$y]] - - List generatedAttributes = generatingPlan.generatedAttributes(); - - @SuppressWarnings("unchecked") - Plan generatingPlanWithResolvedExpressions = (Plan) resolveRenamesFromProject(generatingPlan, project); - - Set namesReferencedInRenames = new HashSet<>(); - for (NamedExpression ne : project.projections()) { - if (ne instanceof Alias as) { - namesReferencedInRenames.addAll(as.child().references().names()); - } - } - Map renameGeneratedAttributeTo = newNamesForConflictingAttributes( - generatingPlan.generatedAttributes(), - namesReferencedInRenames - ); - List newNames = generatedAttributes.stream() - .map(attr -> renameGeneratedAttributeTo.getOrDefault(attr.name(), attr.name())) - .toList(); - Plan generatingPlanWithRenamedAttributes = generatingPlanWithResolvedExpressions.withGeneratedNames(newNames); - - // Put the project at the top, but include the generated attributes. - // Any generated attributes that had to be renamed need to be re-renamed to their original names. - List generatedAttributesRenamedToOriginal = new ArrayList<>(generatedAttributes.size()); - List renamedGeneratedAttributes = generatingPlanWithRenamedAttributes.generatedAttributes(); - for (int i = 0; i < generatedAttributes.size(); i++) { - Attribute originalAttribute = generatedAttributes.get(i); - Attribute renamedAttribute = renamedGeneratedAttributes.get(i); - if (originalAttribute.name().equals(renamedAttribute.name())) { - generatedAttributesRenamedToOriginal.add(renamedAttribute); - } else { - generatedAttributesRenamedToOriginal.add( - new Alias( - originalAttribute.source(), - originalAttribute.name(), - renamedAttribute, - originalAttribute.id(), - originalAttribute.synthetic() - ) - ); - } - } - - Project projectWithGeneratingChild = project.replaceChild(generatingPlanWithRenamedAttributes.replaceChild(project.child())); - return projectWithGeneratingChild.withProjections( - mergeOutputExpressions(generatedAttributesRenamedToOriginal, projectWithGeneratingChild.projections()) - ); - } - - return generatingPlan; - } - - private record AttributeReplacement(List rewrittenExpressions, AttributeMap replacedAttributes) {}; - - /** - * Replace attributes in the given expressions by assigning them temporary names. - * Returns the rewritten expressions and a map with an alias for each replaced attribute; the rewritten expressions reference - * these aliases. - */ - private static AttributeReplacement renameAttributesInExpressions( - Set attributeNamesToRename, - List expressions - ) { - AttributeMap aliasesForReplacedAttributes = new AttributeMap<>(); - List rewrittenExpressions = new ArrayList<>(); - - for (Expression expr : expressions) { - rewrittenExpressions.add(expr.transformUp(Attribute.class, attr -> { - if (attributeNamesToRename.contains(attr.name())) { - Alias renamedAttribute = aliasesForReplacedAttributes.computeIfAbsent(attr, a -> { - String tempName = locallyUniqueTemporaryName(a.name(), "temp_name"); - return new Alias(a.source(), tempName, a, null, true); - }); - return renamedAttribute.toAttribute(); - } - - return attr; - })); - } - - return new AttributeReplacement(rewrittenExpressions, aliasesForReplacedAttributes); - } - - private static Map newNamesForConflictingAttributes( - List potentiallyConflictingAttributes, - Set reservedNames - ) { - if (reservedNames.isEmpty()) { - return Map.of(); - } - - Map renameAttributeTo = new HashMap<>(); - for (Attribute attr : potentiallyConflictingAttributes) { - String name = attr.name(); - if (reservedNames.contains(name)) { - renameAttributeTo.putIfAbsent(name, locallyUniqueTemporaryName(name, "temp_name")); - } - } - - return renameAttributeTo; - } - - public static Project pushDownPastProject(UnaryPlan parent) { - if (parent.child() instanceof Project project) { - UnaryPlan expressionsWithResolvedAliases = resolveRenamesFromProject(parent, project); - - return project.replaceChild(expressionsWithResolvedAliases.replaceChild(project.child())); - } else { - throw new EsqlIllegalArgumentException("Expected child to be instance of Project"); - } - } - - private static UnaryPlan resolveRenamesFromProject(UnaryPlan plan, Project project) { - AttributeMap.Builder aliasBuilder = AttributeMap.builder(); - project.forEachExpression(Alias.class, a -> aliasBuilder.put(a.toAttribute(), a.child())); - var aliases = aliasBuilder.build(); - - return (UnaryPlan) plan.transformExpressionsOnly(ReferenceAttribute.class, r -> aliases.resolve(r, r)); - } - - public abstract static class ParameterizedOptimizerRule extends ParameterizedRule< - SubPlan, - LogicalPlan, - P> { - - public final LogicalPlan apply(LogicalPlan plan, P context) { - return plan.transformDown(typeToken(), t -> rule(t, context)); - } - - protected abstract LogicalPlan rule(SubPlan plan, P context); - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java index e95959d38f328..5e91425296822 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java @@ -9,12 +9,12 @@ import org.elasticsearch.xpack.esql.capabilities.Validatable; import org.elasticsearch.xpack.esql.common.Failures; -import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.DependencyConsistency; +import org.elasticsearch.xpack.esql.optimizer.rules.PlanConsistencyChecker; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; public final class LogicalVerifier { - private static final DependencyConsistency DEPENDENCY_CHECK = new DependencyConsistency<>(); + private static final PlanConsistencyChecker DEPENDENCY_CHECK = new PlanConsistencyChecker<>(); public static final LogicalVerifier INSTANCE = new LogicalVerifier(); private LogicalVerifier() {} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java deleted file mode 100644 index 7808bcbd86545..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.optimizer; - -import org.elasticsearch.xpack.esql.common.Failures; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.NameId; -import org.elasticsearch.xpack.esql.plan.QueryPlan; - -import java.util.HashSet; -import java.util.Set; - -import static org.elasticsearch.xpack.esql.common.Failure.fail; - -class OptimizerRules { - - private OptimizerRules() {} - - static class DependencyConsistency

> { - - void checkPlan(P p, Failures failures) { - AttributeSet refs = p.references(); - AttributeSet input = p.inputSet(); - AttributeSet missing = refs.subtract(input); - // TODO: for Joins, we should probably check if the required fields from the left child are actually in the left child, not - // just any child (and analogously for the right child). - if (missing.isEmpty() == false) { - failures.add(fail(p, "Plan [{}] optimized incorrectly due to missing references {}", p.nodeString(), missing)); - } - - Set outputAttributeNames = new HashSet<>(); - Set outputAttributeIds = new HashSet<>(); - for (Attribute outputAttr : p.output()) { - if (outputAttributeNames.add(outputAttr.name()) == false || outputAttributeIds.add(outputAttr.id()) == false) { - failures.add( - fail( - p, - "Plan [{}] optimized incorrectly due to duplicate output attribute {}", - p.nodeString(), - outputAttr.toString() - ) - ); - } - } - } - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java index c669853d3357e..644bfa7b807ef 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java @@ -11,7 +11,7 @@ import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.util.ReflectionUtils; -import org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules.TransformDirection; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules.TransformDirection; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; public class PhysicalOptimizerRules { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java index d8d1668c092b0..03b9705fefc79 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java @@ -9,32 +9,16 @@ import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.common.Failure; -import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; -import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.rule.RuleExecutor; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.util.Holder; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.ProjectAwayColumns; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; -import java.util.ArrayList; import java.util.Collection; import java.util.List; -import static java.lang.Boolean.FALSE; -import static java.lang.Boolean.TRUE; import static java.util.Arrays.asList; -import static java.util.Collections.singletonList; /** * This class is part of the planner. Performs global (coordinator) optimization of the physical plan. Local (data-node) optimizations @@ -70,64 +54,4 @@ static List> initializeRules(boolean isOptimize protected Iterable> batches() { return rules; } - - /** - * Adds an explicit project to minimize the amount of attributes sent from the local plan to the coordinator. - * This is done here to localize the project close to the data source and simplify the upcoming field - * extraction. - */ - static class ProjectAwayColumns extends Rule { - - @Override - public PhysicalPlan apply(PhysicalPlan plan) { - Holder keepTraversing = new Holder<>(TRUE); - // Invariant: if we add a projection with these attributes after the current plan node, the plan remains valid - // and the overall output will not change. - Holder requiredAttributes = new Holder<>(plan.outputSet()); - - // This will require updating should we choose to have non-unary execution plans in the future. - return plan.transformDown(UnaryExec.class, currentPlanNode -> { - if (keepTraversing.get() == false) { - return currentPlanNode; - } - if (currentPlanNode instanceof ExchangeExec exec) { - keepTraversing.set(FALSE); - var child = exec.child(); - // otherwise expect a Fragment - if (child instanceof FragmentExec fragmentExec) { - var logicalFragment = fragmentExec.fragment(); - - // no need for projection when dealing with aggs - if (logicalFragment instanceof Aggregate == false) { - List output = new ArrayList<>(requiredAttributes.get()); - // if all the fields are filtered out, it's only the count that matters - // however until a proper fix (see https://github.com/elastic/elasticsearch/issues/98703) - // add a synthetic field (so it doesn't clash with the user defined one) to return a constant - // to avoid the block from being trimmed - if (output.isEmpty()) { - var alias = new Alias(logicalFragment.source(), "", Literal.NULL, null, true); - List fields = singletonList(alias); - logicalFragment = new Eval(logicalFragment.source(), logicalFragment, fields); - output = Expressions.asAttributes(fields); - } - // add a logical projection (let the local replanning remove it if needed) - FragmentExec newChild = new FragmentExec( - Source.EMPTY, - new Project(logicalFragment.source(), logicalFragment, output), - fragmentExec.esFilter(), - fragmentExec.estimatedRowSize(), - fragmentExec.reducer() - ); - return new ExchangeExec(exec.source(), output, exec.inBetweenAggs(), newChild); - } - } - } else { - AttributeSet childOutput = currentPlanNode.inputSet(); - AttributeSet addedAttributes = currentPlanNode.outputSet().subtract(childOutput); - requiredAttributes.set(requiredAttributes.get().subtract(addedAttributes).combine(currentPlanNode.references())); - } - return currentPlanNode; - }); - } - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java index f9ce83d5c1f15..8bd8aba01fd21 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java @@ -10,7 +10,7 @@ import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.DependencyConsistency; +import org.elasticsearch.xpack.esql.optimizer.rules.PlanConsistencyChecker; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -24,7 +24,7 @@ public final class PhysicalVerifier { public static final PhysicalVerifier INSTANCE = new PhysicalVerifier(); - private static final DependencyConsistency DEPENDENCY_CHECK = new DependencyConsistency<>(); + private static final PlanConsistencyChecker DEPENDENCY_CHECK = new PlanConsistencyChecker<>(); private PhysicalVerifier() {} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java new file mode 100644 index 0000000000000..30de8945a4c20 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.xpack.esql.common.Failures; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.NameId; +import org.elasticsearch.xpack.esql.plan.QueryPlan; + +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.xpack.esql.common.Failure.fail; + +public class PlanConsistencyChecker

> { + + /** + * Check whether a single {@link QueryPlan} produces no duplicate attributes and its children provide all of its required + * {@link QueryPlan#references() references}. Otherwise, add + * {@link org.elasticsearch.xpack.esql.common.Failure Failure}s to the {@link Failures} object. + */ + public void checkPlan(P p, Failures failures) { + AttributeSet refs = p.references(); + AttributeSet input = p.inputSet(); + AttributeSet missing = refs.subtract(input); + // TODO: for Joins, we should probably check if the required fields from the left child are actually in the left child, not + // just any child (and analogously for the right child). + if (missing.isEmpty() == false) { + failures.add(fail(p, "Plan [{}] optimized incorrectly due to missing references {}", p.nodeString(), missing)); + } + + Set outputAttributeNames = new HashSet<>(); + Set outputAttributeIds = new HashSet<>(); + for (Attribute outputAttr : p.output()) { + if (outputAttributeNames.add(outputAttr.name()) == false || outputAttributeIds.add(outputAttr.id()) == false) { + failures.add( + fail(p, "Plan [{}] optimized incorrectly due to duplicate output attribute {}", p.nodeString(), outputAttr.toString()) + ); + } + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/AddDefaultTopN.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/AddDefaultTopN.java similarity index 87% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/AddDefaultTopN.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/AddDefaultTopN.java index 9208eba740100..02815d45d2896 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/AddDefaultTopN.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/AddDefaultTopN.java @@ -5,12 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.OrderBy; @@ -39,7 +38,10 @@ * OR if there is no sort between "limit" and "mv_expand". * But, since this type of query has such a filter, the "sort emp_no" will have no limit when it reaches the current rule. */ -public final class AddDefaultTopN extends LogicalPlanOptimizer.ParameterizedOptimizerRule { +public final class AddDefaultTopN extends OptimizerRules.ParameterizedOptimizerRule { + public AddDefaultTopN() { + super(OptimizerRules.TransformDirection.DOWN); + } @Override protected LogicalPlan rule(LogicalPlan plan, LogicalOptimizerContext context) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsElimination.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanFunctionEqualsElimination.java similarity index 96% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsElimination.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanFunctionEqualsElimination.java index 1cdc2c02c8469..3152f9b574767 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsElimination.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanFunctionEqualsElimination.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.Function; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplification.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanSimplification.java similarity index 98% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplification.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanSimplification.java index 6cb5bb29b5dd4..73d1ea1fb6e8f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplification.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanSimplification.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineBinaryComparisons.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineBinaryComparisons.java similarity index 99% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineBinaryComparisons.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineBinaryComparisons.java index 0d1d5baf920d7..3f47c74aaf814 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineBinaryComparisons.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineBinaryComparisons.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctions.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineDisjunctions.java similarity index 99% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctions.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineDisjunctions.java index ceac1aa9ca75b..5cb377de47efc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctions.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineDisjunctions.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineEvals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineEvals.java similarity index 94% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineEvals.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineEvals.java index f8210d06e4439..7afe33d8daecc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineEvals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineEvals.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.plan.logical.Eval; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineProjections.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java similarity index 99% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineProjections.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java index 3c0ac9056c8c5..64c32367d0d57 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineProjections.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Alias; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFolding.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConstantFolding.java similarity index 91% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFolding.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConstantFolding.java index 2178013c42148..82fe2c6bddf50 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFolding.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConstantFolding.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConvertStringToByteRef.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConvertStringToByteRef.java similarity index 95% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConvertStringToByteRef.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConvertStringToByteRef.java index a1969df3f898a..0604750883f14 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConvertStringToByteRef.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConvertStringToByteRef.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/DuplicateLimitAfterMvExpand.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/DuplicateLimitAfterMvExpand.java similarity index 98% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/DuplicateLimitAfterMvExpand.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/DuplicateLimitAfterMvExpand.java index ab1dc407a7a4a..8985f4ab24705 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/DuplicateLimitAfterMvExpand.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/DuplicateLimitAfterMvExpand.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNull.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNull.java similarity index 95% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNull.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNull.java index 0e864c13ca6aa..0561865213a1b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNull.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNull.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRight.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/LiteralsOnTheRight.java similarity index 92% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRight.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/LiteralsOnTheRight.java index 36d39e0ee1c73..d96c73d5ee4f1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRight.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/LiteralsOnTheRight.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/OptimizerRules.java similarity index 75% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/OptimizerRules.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/OptimizerRules.java index 6bc0d9016eb9f..f087fab06828e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/OptimizerRules.java @@ -4,9 +4,10 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.util.ReflectionUtils; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; @@ -69,4 +70,24 @@ public enum TransformDirection { UP, DOWN } + + public abstract static class ParameterizedOptimizerRule extends ParameterizedRule< + SubPlan, + LogicalPlan, + P> { + + private final TransformDirection direction; + + protected ParameterizedOptimizerRule(TransformDirection direction) { + this.direction = direction; + } + + public final LogicalPlan apply(LogicalPlan plan, P context) { + return direction == TransformDirection.DOWN + ? plan.transformDown(typeToken(), t -> rule(t, context)) + : plan.transformUp(typeToken(), t -> rule(t, context)); + } + + protected abstract LogicalPlan rule(SubPlan plan, P context); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PartiallyFoldCase.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PartiallyFoldCase.java similarity index 82% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PartiallyFoldCase.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PartiallyFoldCase.java index 78435f852982e..118e4fc170520 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PartiallyFoldCase.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PartiallyFoldCase.java @@ -5,12 +5,12 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; -import static org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules.TransformDirection.DOWN; +import static org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules.TransformDirection.DOWN; /** * Fold the arms of {@code CASE} statements. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEmptyRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEmptyRelation.java similarity index 88% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEmptyRelation.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEmptyRelation.java index c57e490423ce8..8437b79454884 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEmptyRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEmptyRelation.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; @@ -36,9 +35,9 @@ protected LogicalPlan rule(UnaryPlan plan) { // only care about non-grouped aggs might return something (count) if (plan instanceof Aggregate agg && agg.groupings().isEmpty()) { List emptyBlocks = aggsFromEmpty(agg.aggregates()); - p = LogicalPlanOptimizer.skipPlan(plan, LocalSupplier.of(emptyBlocks.toArray(Block[]::new))); + p = replacePlanByRelation(plan, LocalSupplier.of(emptyBlocks.toArray(Block[]::new))); } else { - p = LogicalPlanOptimizer.skipPlan(plan); + p = PruneEmptyPlans.skipPlan(plan); } } return p; @@ -69,4 +68,8 @@ protected void aggOutput(NamedExpression agg, AggregateFunction aggFunc, BlockFa wrapper.accept(value); blocks.add(wrapper.builder().build()); } + + private static LogicalPlan replacePlanByRelation(UnaryPlan plan, LocalSupplier supplier) { + return new LocalRelation(plan.source(), plan.output(), supplier); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEquals.java similarity index 99% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEquals.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEquals.java index 8e5d203942c7a..0bd98db1e1d7a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEquals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEquals.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEvalFoldables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEvalFoldables.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEvalFoldables.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEvalFoldables.java index 9231105c9b663..139f192d3c14e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEvalFoldables.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEvalFoldables.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullable.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateNullable.java similarity index 98% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullable.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateNullable.java index a56f0633fe286..738ca83b47e42 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullable.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateNullable.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneColumns.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneColumns.java similarity index 98% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneColumns.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneColumns.java index baeabb534aa3c..62f4e391f13ec 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneColumns.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneColumns.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockUtils; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneEmptyPlans.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneEmptyPlans.java similarity index 56% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneEmptyPlans.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneEmptyPlans.java index 739d59d8b0df6..afd2b4e05493b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneEmptyPlans.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneEmptyPlans.java @@ -5,16 +5,21 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; public final class PruneEmptyPlans extends OptimizerRules.OptimizerRule { + public static LogicalPlan skipPlan(UnaryPlan plan) { + return new LocalRelation(plan.source(), plan.output(), LocalSupplier.EMPTY); + } + @Override protected LogicalPlan rule(UnaryPlan plan) { - return plan.output().isEmpty() ? LogicalPlanOptimizer.skipPlan(plan) : plan; + return plan.output().isEmpty() ? skipPlan(plan) : plan; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneFilters.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneFilters.java similarity index 93% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneFilters.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneFilters.java index 7e9ff7c5f5f02..b6f7ac9e464f4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneFilters.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneFilters.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogic; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; @@ -31,7 +30,7 @@ protected LogicalPlan rule(Filter filter) { return filter.child(); } if (FALSE.equals(condition) || Expressions.isNull(condition)) { - return LogicalPlanOptimizer.skipPlan(filter); + return PruneEmptyPlans.skipPlan(filter); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneLiteralsInOrderBy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneLiteralsInOrderBy.java similarity index 95% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneLiteralsInOrderBy.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneLiteralsInOrderBy.java index 73273cbf17d92..1b9cabeb1fae5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneLiteralsInOrderBy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneLiteralsInOrderBy.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneOrderByBeforeStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneOrderByBeforeStats.java similarity index 96% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneOrderByBeforeStats.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneOrderByBeforeStats.java index f2ef524f2c91e..24fb8971487d5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneOrderByBeforeStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneOrderByBeforeStats.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneRedundantSortClauses.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneRedundantSortClauses.java similarity index 94% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneRedundantSortClauses.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneRedundantSortClauses.java index 33918d29d5ded..a571aa5de7d0b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneRedundantSortClauses.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneRedundantSortClauses.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.ExpressionSet; import org.elasticsearch.xpack.esql.expression.Order; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineFilters.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java similarity index 96% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineFilters.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java index 48013e113fe43..e39b590228d57 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineFilters.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; @@ -59,7 +58,7 @@ protected LogicalPlan rule(Filter filter) { var attributes = new AttributeSet(Expressions.asAttributes(enrich.enrichFields())); plan = maybePushDownPastUnary(filter, enrich, attributes::contains); } else if (child instanceof Project) { - return LogicalPlanOptimizer.pushDownPastProject(filter); + return PushDownUtils.pushDownPastProject(filter); } else if (child instanceof OrderBy orderBy) { // swap the filter with its child plan = orderBy.replaceChild(filter.with(orderBy.child(), condition)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineLimits.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java similarity index 98% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineLimits.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java index 62ecf9ccd09be..08f32b094a95a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineLimits.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineOrderBy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineOrderBy.java similarity index 82% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineOrderBy.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineOrderBy.java index 286695abda25b..c0c78b05e1ce3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineOrderBy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineOrderBy.java @@ -5,9 +5,8 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; @@ -21,7 +20,7 @@ protected LogicalPlan rule(OrderBy orderBy) { // combine orders return new OrderBy(orderBy.source(), childOrder.child(), orderBy.order()); } else if (child instanceof Project) { - return LogicalPlanOptimizer.pushDownPastProject(orderBy); + return PushDownUtils.pushDownPastProject(orderBy); } return orderBy; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownEnrich.java similarity index 72% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownEnrich.java index 5e6def37cbf04..4e370078cf45c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownEnrich.java @@ -5,15 +5,14 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; public final class PushDownEnrich extends OptimizerRules.OptimizerRule { @Override protected LogicalPlan rule(Enrich en) { - return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(en); + return PushDownUtils.pushGeneratingPlanPastProjectAndOrderBy(en); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownEval.java similarity index 71% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownEval.java index 1f5fd072f267c..bd8aac47c47c3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownEval.java @@ -5,15 +5,14 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; public final class PushDownEval extends OptimizerRules.OptimizerRule { @Override protected LogicalPlan rule(Eval eval) { - return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(eval); + return PushDownUtils.pushGeneratingPlanPastProjectAndOrderBy(eval); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownRegexExtract.java similarity index 72% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownRegexExtract.java index 3f64f47e11879..e58ed1315c034 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownRegexExtract.java @@ -5,15 +5,14 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; public final class PushDownRegexExtract extends OptimizerRules.OptimizerRule { @Override protected LogicalPlan rule(RegexExtract re) { - return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(re); + return PushDownUtils.pushGeneratingPlanPastProjectAndOrderBy(re); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownUtils.java new file mode 100644 index 0000000000000..6c8caaf783f43 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownUtils.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical; + +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeMap; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; +import org.elasticsearch.xpack.esql.expression.Order; +import org.elasticsearch.xpack.esql.plan.GeneratingPlan; +import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputExpressions; + +class PushDownUtils { + /** + * Pushes LogicalPlans which generate new attributes (Eval, Grok/Dissect, Enrich), past OrderBys and Projections. + * Although it seems arbitrary whether the OrderBy or the generating plan is executed first, this transformation ensures that OrderBys + * only separated by e.g. an Eval can be combined by {@link PushDownAndCombineOrderBy}. + *

+ * E.g. {@code ... | sort a | eval x = b + 1 | sort x} becomes {@code ... | eval x = b + 1 | sort a | sort x} + *

+ * Ordering the generating plans before the OrderBys has the advantage that it's always possible to order the plans like this. + * E.g., in the example above it would not be possible to put the eval after the two orderBys. + *

+ * In case one of the generating plan's attributes would shadow the OrderBy's attributes, we alias the generated attribute first. + *

+ * E.g. {@code ... | sort a | eval a = b + 1 | ...} becomes {@code ... | eval $$a = a | eval a = b + 1 | sort $$a | drop $$a ...} + *

+ * In case the generating plan's attributes would shadow the Project's attributes, we rename the generated attributes in place. + *

+ * E.g. {@code ... | rename a as z | eval a = b + 1 | ...} becomes {@code ... eval $$a = b + 1 | rename a as z, $$a as a ...} + */ + public static > LogicalPlan pushGeneratingPlanPastProjectAndOrderBy(Plan generatingPlan) { + LogicalPlan child = generatingPlan.child(); + if (child instanceof OrderBy orderBy) { + Set evalFieldNames = new LinkedHashSet<>(Expressions.names(generatingPlan.generatedAttributes())); + + // Look for attributes in the OrderBy's expressions and create aliases with temporary names for them. + AttributeReplacement nonShadowedOrders = renameAttributesInExpressions(evalFieldNames, orderBy.order()); + + AttributeMap aliasesForShadowedOrderByAttrs = nonShadowedOrders.replacedAttributes; + @SuppressWarnings("unchecked") + List newOrder = (List) (List) nonShadowedOrders.rewrittenExpressions; + + if (aliasesForShadowedOrderByAttrs.isEmpty() == false) { + List newAliases = new ArrayList<>(aliasesForShadowedOrderByAttrs.values()); + + LogicalPlan plan = new Eval(orderBy.source(), orderBy.child(), newAliases); + plan = generatingPlan.replaceChild(plan); + plan = new OrderBy(orderBy.source(), plan, newOrder); + plan = new Project(generatingPlan.source(), plan, generatingPlan.output()); + + return plan; + } + + return orderBy.replaceChild(generatingPlan.replaceChild(orderBy.child())); + } else if (child instanceof Project project) { + // We need to account for attribute shadowing: a rename might rely on a name generated in an Eval/Grok/Dissect/Enrich. + // E.g. in: + // + // Eval[[2 * x{f}#1 AS y]] + // \_Project[[x{f}#1, y{f}#2, y{f}#2 AS z]] + // + // Just moving the Eval down breaks z because we shadow y{f}#2. + // Instead, we use a different alias in the Eval, eventually renaming back to y: + // + // Project[[x{f}#1, y{f}#2 as z, $$y{r}#3 as y]] + // \_Eval[[2 * x{f}#1 as $$y]] + + List generatedAttributes = generatingPlan.generatedAttributes(); + + @SuppressWarnings("unchecked") + Plan generatingPlanWithResolvedExpressions = (Plan) resolveRenamesFromProject(generatingPlan, project); + + Set namesReferencedInRenames = new HashSet<>(); + for (NamedExpression ne : project.projections()) { + if (ne instanceof Alias as) { + namesReferencedInRenames.addAll(as.child().references().names()); + } + } + Map renameGeneratedAttributeTo = newNamesForConflictingAttributes( + generatingPlan.generatedAttributes(), + namesReferencedInRenames + ); + List newNames = generatedAttributes.stream() + .map(attr -> renameGeneratedAttributeTo.getOrDefault(attr.name(), attr.name())) + .toList(); + Plan generatingPlanWithRenamedAttributes = generatingPlanWithResolvedExpressions.withGeneratedNames(newNames); + + // Put the project at the top, but include the generated attributes. + // Any generated attributes that had to be renamed need to be re-renamed to their original names. + List generatedAttributesRenamedToOriginal = new ArrayList<>(generatedAttributes.size()); + List renamedGeneratedAttributes = generatingPlanWithRenamedAttributes.generatedAttributes(); + for (int i = 0; i < generatedAttributes.size(); i++) { + Attribute originalAttribute = generatedAttributes.get(i); + Attribute renamedAttribute = renamedGeneratedAttributes.get(i); + if (originalAttribute.name().equals(renamedAttribute.name())) { + generatedAttributesRenamedToOriginal.add(renamedAttribute); + } else { + generatedAttributesRenamedToOriginal.add( + new Alias( + originalAttribute.source(), + originalAttribute.name(), + renamedAttribute, + originalAttribute.id(), + originalAttribute.synthetic() + ) + ); + } + } + + Project projectWithGeneratingChild = project.replaceChild(generatingPlanWithRenamedAttributes.replaceChild(project.child())); + return projectWithGeneratingChild.withProjections( + mergeOutputExpressions(generatedAttributesRenamedToOriginal, projectWithGeneratingChild.projections()) + ); + } + + return generatingPlan; + } + + /** + * Replace attributes in the given expressions by assigning them temporary names. + * Returns the rewritten expressions and a map with an alias for each replaced attribute; the rewritten expressions reference + * these aliases. + */ + private static AttributeReplacement renameAttributesInExpressions( + Set attributeNamesToRename, + List expressions + ) { + AttributeMap aliasesForReplacedAttributes = new AttributeMap<>(); + List rewrittenExpressions = new ArrayList<>(); + + for (Expression expr : expressions) { + rewrittenExpressions.add(expr.transformUp(Attribute.class, attr -> { + if (attributeNamesToRename.contains(attr.name())) { + Alias renamedAttribute = aliasesForReplacedAttributes.computeIfAbsent(attr, a -> { + String tempName = TemporaryNameUtils.locallyUniqueTemporaryName(a.name(), "temp_name"); + return new Alias(a.source(), tempName, a, null, true); + }); + return renamedAttribute.toAttribute(); + } + + return attr; + })); + } + + return new AttributeReplacement(rewrittenExpressions, aliasesForReplacedAttributes); + } + + private static Map newNamesForConflictingAttributes( + List potentiallyConflictingAttributes, + Set reservedNames + ) { + if (reservedNames.isEmpty()) { + return Map.of(); + } + + Map renameAttributeTo = new HashMap<>(); + for (Attribute attr : potentiallyConflictingAttributes) { + String name = attr.name(); + if (reservedNames.contains(name)) { + renameAttributeTo.putIfAbsent(name, TemporaryNameUtils.locallyUniqueTemporaryName(name, "temp_name")); + } + } + + return renameAttributeTo; + } + + public static Project pushDownPastProject(UnaryPlan parent) { + if (parent.child() instanceof Project project) { + UnaryPlan expressionsWithResolvedAliases = resolveRenamesFromProject(parent, project); + + return project.replaceChild(expressionsWithResolvedAliases.replaceChild(project.child())); + } else { + throw new EsqlIllegalArgumentException("Expected child to be instance of Project"); + } + } + + private static UnaryPlan resolveRenamesFromProject(UnaryPlan plan, Project project) { + AttributeMap.Builder aliasBuilder = AttributeMap.builder(); + project.forEachExpression(Alias.class, a -> aliasBuilder.put(a.toAttribute(), a.child())); + var aliases = aliasBuilder.build(); + + return (UnaryPlan) plan.transformExpressionsOnly(ReferenceAttribute.class, r -> aliases.resolve(r, r)); + } + + private record AttributeReplacement(List rewrittenExpressions, AttributeMap replacedAttributes) {} +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/RemoveStatsOverride.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/RemoveStatsOverride.java index 0f8e0f450e585..ad424f6882d26 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/RemoveStatsOverride.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.esql.analysis.AnalyzerRules; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceAliasingEvalWithProject.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAliasingEvalWithProject.java similarity index 98% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceAliasingEvalWithProject.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAliasingEvalWithProject.java index 34b75cd89f68c..e57a95f0f7dad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceAliasingEvalWithProject.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAliasingEvalWithProject.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLimitAndSortAsTopN.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceLimitAndSortAsTopN.java similarity index 93% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLimitAndSortAsTopN.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceLimitAndSortAsTopN.java index 6394d11bb68c8..7d44fa1fda5a2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLimitAndSortAsTopN.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceLimitAndSortAsTopN.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLookupWithJoin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceLookupWithJoin.java similarity index 93% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLookupWithJoin.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceLookupWithJoin.java index f258ea97bfa33..09ed113de1622 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLookupWithJoin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceLookupWithJoin.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceOrderByExpressionWithEval.java similarity index 96% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceOrderByExpressionWithEval.java index 9bb28f38ea65f..3ea469781ae78 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceOrderByExpressionWithEval.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatch.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatch.java similarity index 96% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatch.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatch.java index cc18940e68924..b462a99bc5e14 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatch.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatch.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsAggExpressionWithEval.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsAggExpressionWithEval.java index ea0a302f7131d..d74811518624a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsAggExpressionWithEval.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.common.util.Maps; import org.elasticsearch.xpack.esql.core.expression.Alias; @@ -16,7 +16,6 @@ import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; @@ -143,6 +142,6 @@ protected LogicalPlan rule(Aggregate aggregate) { } static String syntheticName(Expression expression, Expression af, int counter) { - return LogicalPlanOptimizer.temporaryName(expression, af, counter); + return TemporaryNameUtils.temporaryName(expression, af, counter); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsNestedExpressionWithEval.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsNestedExpressionWithEval.java index 02b39f6babef0..c3eff15bcec9e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsNestedExpressionWithEval.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Stats; @@ -152,6 +151,6 @@ private LogicalPlan rule(Stats aggregate) { } static String syntheticName(Expression expression, AggregateFunction af, int counter) { - return LogicalPlanOptimizer.temporaryName(expression, af, counter); + return TemporaryNameUtils.temporaryName(expression, af, counter); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceTrivialTypeConversions.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceTrivialTypeConversions.java similarity index 95% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceTrivialTypeConversions.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceTrivialTypeConversions.java index dc877a99010f8..8cb977f97b4b3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceTrivialTypeConversions.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceTrivialTypeConversions.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SetAsOptimized.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SetAsOptimized.java similarity index 92% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SetAsOptimized.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SetAsOptimized.java index 89d2e7613d2c7..c9a2b44e40ebf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SetAsOptimized.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SetAsOptimized.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SimplifyComparisonsArithmetics.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SimplifyComparisonsArithmetics.java similarity index 99% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SimplifyComparisonsArithmetics.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SimplifyComparisonsArithmetics.java index fe83aeb647bf9..d3a9970896c16 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SimplifyComparisonsArithmetics.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SimplifyComparisonsArithmetics.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnEmptyMappings.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SkipQueryOnEmptyMappings.java similarity index 92% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnEmptyMappings.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SkipQueryOnEmptyMappings.java index 99efacd4ea39a..a8672b64c8b98 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnEmptyMappings.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SkipQueryOnEmptyMappings.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnLimitZero.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SkipQueryOnLimitZero.java similarity index 78% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnLimitZero.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SkipQueryOnLimitZero.java index 199520d648a26..5d98d941bb207 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnLimitZero.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/SkipQueryOnLimitZero.java @@ -5,9 +5,8 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; @@ -16,7 +15,7 @@ public final class SkipQueryOnLimitZero extends OptimizerRules.OptimizerRule TO_STRING_LIMIT ? string.substring(0, TO_STRING_LIMIT - 1) + ">" : string; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/TranslateMetricsAggregate.java similarity index 99% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/TranslateMetricsAggregate.java index f2926fe92ca3f..2879db5042f4a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/TranslateMetricsAggregate.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/InferIsNotNull.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/InferIsNotNull.java new file mode 100644 index 0000000000000..81ae81bbba7b7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/InferIsNotNull.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical.local; + +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeMap; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.esql.core.rule.Rule; +import org.elasticsearch.xpack.esql.core.util.CollectionUtils; +import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import java.util.LinkedHashSet; +import java.util.Set; + +import static java.util.Collections.emptySet; + +/** + * Simplify IsNotNull targets by resolving the underlying expression to its root fields with unknown + * nullability. + * e.g. + * (x + 1) / 2 IS NOT NULL --> x IS NOT NULL AND (x+1) / 2 IS NOT NULL + * SUBSTRING(x, 3) > 4 IS NOT NULL --> x IS NOT NULL AND SUBSTRING(x, 3) > 4 IS NOT NULL + * When dealing with multiple fields, a conjunction/disjunction based on the predicate: + * (x + y) / 4 IS NOT NULL --> x IS NOT NULL AND y IS NOT NULL AND (x + y) / 4 IS NOT NULL + * This handles the case of fields nested inside functions or expressions in order to avoid: + * - having to evaluate the whole expression + * - not pushing down the filter due to expression evaluation + * IS NULL cannot be simplified since it leads to a disjunction which prevents the filter to be + * pushed down: + * (x + 1) IS NULL --> x IS NULL OR x + 1 IS NULL + * and x IS NULL cannot be pushed down + *
+ * Implementation-wise this rule goes bottom-up, keeping an alias up to date to the current plan + * and then looks for replacing the target. + */ +public class InferIsNotNull extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan plan) { + // the alias map is shared across the whole plan + AttributeMap aliases = new AttributeMap<>(); + // traverse bottom-up to pick up the aliases as we go + plan = plan.transformUp(p -> inspectPlan(p, aliases)); + return plan; + } + + private LogicalPlan inspectPlan(LogicalPlan plan, AttributeMap aliases) { + // inspect just this plan properties + plan.forEachExpression(Alias.class, a -> aliases.put(a.toAttribute(), a.child())); + // now go about finding isNull/isNotNull + LogicalPlan newPlan = plan.transformExpressionsOnlyUp(IsNotNull.class, inn -> inferNotNullable(inn, aliases)); + return newPlan; + } + + private Expression inferNotNullable(IsNotNull inn, AttributeMap aliases) { + Expression result = inn; + Set refs = resolveExpressionAsRootAttributes(inn.field(), aliases); + // no refs found or could not detect - return the original function + if (refs.size() > 0) { + // add IsNull for the filters along with the initial inn + var innList = CollectionUtils.combine(refs.stream().map(r -> (Expression) new IsNotNull(inn.source(), r)).toList(), inn); + result = Predicates.combineAnd(innList); + } + return result; + } + + /** + * Unroll the expression to its references to get to the root fields + * that really matter for filtering. + */ + protected Set resolveExpressionAsRootAttributes(Expression exp, AttributeMap aliases) { + Set resolvedExpressions = new LinkedHashSet<>(); + boolean changed = doResolve(exp, aliases, resolvedExpressions); + return changed ? resolvedExpressions : emptySet(); + } + + private boolean doResolve(Expression exp, AttributeMap aliases, Set resolvedExpressions) { + boolean changed = false; + // check if the expression can be skipped or is not nullabe + if (skipExpression(exp)) { + resolvedExpressions.add(exp); + } else { + for (Expression e : exp.references()) { + Expression resolved = aliases.resolve(e, e); + // found a root attribute, bail out + if (resolved instanceof Attribute a && resolved == e) { + resolvedExpressions.add(a); + // don't mark things as change if the original expression hasn't been broken down + changed |= resolved != exp; + } else { + // go further + changed |= doResolve(resolved, aliases, resolvedExpressions); + } + } + } + return changed; + } + + private static boolean skipExpression(Expression e) { + return e instanceof Coalesce; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/InferNonNullAggConstraint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/InferNonNullAggConstraint.java new file mode 100644 index 0000000000000..35bb4e1dc082f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/InferNonNullAggConstraint.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical.local; + +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.optimizer.LocalLogicalOptimizerContext; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.stats.SearchStats; + +import java.util.Set; + +/** + * The vast majority of aggs ignore null entries - this rule adds a pushable filter, as it is cheap + * to execute, to filter this entries out to begin with. + * STATS x = min(a), y = sum(b) + * becomes + * | WHERE a IS NOT NULL OR b IS NOT NULL + * | STATS x = min(a), y = sum(b) + *
+ * Unfortunately this optimization cannot be applied when grouping is necessary since it can filter out + * groups containing only null values + */ +public class InferNonNullAggConstraint extends OptimizerRules.ParameterizedOptimizerRule { + public InferNonNullAggConstraint() { + super(OptimizerRules.TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(Aggregate aggregate, LocalLogicalOptimizerContext context) { + // only look at aggregates with default grouping + if (aggregate.groupings().size() > 0) { + return aggregate; + } + + SearchStats stats = context.searchStats(); + LogicalPlan plan = aggregate; + var aggs = aggregate.aggregates(); + Set nonNullAggFields = Sets.newLinkedHashSetWithExpectedSize(aggs.size()); + for (var agg : aggs) { + if (Alias.unwrap(agg) instanceof AggregateFunction af) { + Expression field = af.field(); + // ignore literals (e.g. COUNT(1)) + // make sure the field exists at the source and is indexed (not runtime) + if (field.foldable() == false && field instanceof FieldAttribute fa && stats.isIndexed(fa.name())) { + nonNullAggFields.add(field); + } else { + // otherwise bail out since unless disjunction needs to cover _all_ fields, things get filtered out + return plan; + } + } + } + + if (nonNullAggFields.size() > 0) { + Expression condition = Predicates.combineOr( + nonNullAggFields.stream().map(f -> (Expression) new IsNotNull(aggregate.source(), f)).toList() + ); + plan = aggregate.replaceChild(new Filter(aggregate.source(), aggregate.child(), condition)); + } + return plan; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/LocalPropagateEmptyRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/LocalPropagateEmptyRelation.java new file mode 100644 index 0000000000000..d29da1354ef3c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/LocalPropagateEmptyRelation.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical.local; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEmptyRelation; +import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.util.List; + +/** + * Local aggregation can only produce intermediate state that get wired into the global agg. + */ +public class LocalPropagateEmptyRelation extends PropagateEmptyRelation { + + /** + * Local variant of the aggregation that returns the intermediate value. + */ + @Override + protected void aggOutput(NamedExpression agg, AggregateFunction aggFunc, BlockFactory blockFactory, List blocks) { + List output = AbstractPhysicalOperationProviders.intermediateAttributes(List.of(agg), List.of()); + for (Attribute o : output) { + DataType dataType = o.dataType(); + // boolean right now is used for the internal #seen so always return true + var value = dataType == DataType.BOOLEAN ? true + // look for count(literal) with literal != null + : aggFunc instanceof Count count && (count.foldable() == false || count.fold() != null) ? 0L + // otherwise nullify + : null; + var wrapper = BlockUtils.wrapperFor(blockFactory, PlannerUtils.toElementType(dataType), 1); + wrapper.accept(value); + blocks.add(wrapper.builder().build()); + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java new file mode 100644 index 0000000000000..b13667465ce63 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical.local; + +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.optimizer.LocalLogicalOptimizerContext; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; +import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; +import org.elasticsearch.xpack.esql.stats.SearchStats; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Look for any fields used in the plan that are missing locally and replace them with null. + * This should minimize the plan execution, in the best scenario skipping its execution all together. + */ +public class ReplaceMissingFieldWithNull extends ParameterizedRule { + + @Override + public LogicalPlan apply(LogicalPlan plan, LocalLogicalOptimizerContext localLogicalOptimizerContext) { + return plan.transformUp(p -> missingToNull(p, localLogicalOptimizerContext.searchStats())); + } + + private LogicalPlan missingToNull(LogicalPlan plan, SearchStats stats) { + if (plan instanceof EsRelation || plan instanceof LocalRelation) { + return plan; + } + + if (plan instanceof Aggregate a) { + // don't do anything (for now) + return a; + } + // keep the aliased name + else if (plan instanceof Project project) { + var projections = project.projections(); + List newProjections = new ArrayList<>(projections.size()); + Map nullLiteral = Maps.newLinkedHashMapWithExpectedSize(DataType.types().size()); + + for (NamedExpression projection : projections) { + // Do not use the attribute name, this can deviate from the field name for union types. + if (projection instanceof FieldAttribute f && stats.exists(f.fieldName()) == false) { + DataType dt = f.dataType(); + Alias nullAlias = nullLiteral.get(f.dataType()); + // save the first field as null (per datatype) + if (nullAlias == null) { + Alias alias = new Alias(f.source(), f.name(), Literal.of(f, null), f.id()); + nullLiteral.put(dt, alias); + projection = alias.toAttribute(); + } + // otherwise point to it + else { + // since avoids creating field copies + projection = new Alias(f.source(), f.name(), nullAlias.toAttribute(), f.id()); + } + } + + newProjections.add(projection); + } + // add the first found field as null + if (nullLiteral.size() > 0) { + plan = new Eval(project.source(), project.child(), new ArrayList<>(nullLiteral.values())); + plan = new Project(project.source(), plan, newProjections); + } + } else if (plan instanceof Eval + || plan instanceof Filter + || plan instanceof OrderBy + || plan instanceof RegexExtract + || plan instanceof TopN) { + plan = plan.transformExpressionsOnlyUp( + FieldAttribute.class, + // Do not use the attribute name, this can deviate from the field name for union types. + f -> stats.exists(f.fieldName()) ? f : Literal.of(f, null) + ); + } + + return plan; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceTopNWithLimitAndSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceTopNWithLimitAndSort.java new file mode 100644 index 0000000000000..70bb401814e6c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceTopNWithLimitAndSort.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical.local; + +import org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.TopN; + +import static org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules.TransformDirection.UP; + +/** + * Break TopN back into Limit + OrderBy to allow the order rules to kick in. + */ +public class ReplaceTopNWithLimitAndSort extends OptimizerRules.OptimizerRule { + public ReplaceTopNWithLimitAndSort() { + super(UP); + } + + @Override + protected LogicalPlan rule(TopN plan) { + return new Limit(plan.source(), plan.limit(), new OrderBy(plan.source(), plan.child(), plan.order())); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java new file mode 100644 index 0000000000000..bee27acd06ec0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical; + +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.rule.Rule; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.util.Holder; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; +import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; + +import java.util.ArrayList; +import java.util.List; + +import static java.lang.Boolean.FALSE; +import static java.lang.Boolean.TRUE; +import static java.util.Collections.singletonList; + +/** + * Adds an explicit project to minimize the amount of attributes sent from the local plan to the coordinator. + * This is done here to localize the project close to the data source and simplify the upcoming field + * extraction. + */ +public class ProjectAwayColumns extends Rule { + + @Override + public PhysicalPlan apply(PhysicalPlan plan) { + Holder keepTraversing = new Holder<>(TRUE); + // Invariant: if we add a projection with these attributes after the current plan node, the plan remains valid + // and the overall output will not change. + Holder requiredAttributes = new Holder<>(plan.outputSet()); + + // This will require updating should we choose to have non-unary execution plans in the future. + return plan.transformDown(UnaryExec.class, currentPlanNode -> { + if (keepTraversing.get() == false) { + return currentPlanNode; + } + if (currentPlanNode instanceof ExchangeExec exec) { + keepTraversing.set(FALSE); + var child = exec.child(); + // otherwise expect a Fragment + if (child instanceof FragmentExec fragmentExec) { + var logicalFragment = fragmentExec.fragment(); + + // no need for projection when dealing with aggs + if (logicalFragment instanceof Aggregate == false) { + List output = new ArrayList<>(requiredAttributes.get()); + // if all the fields are filtered out, it's only the count that matters + // however until a proper fix (see https://github.com/elastic/elasticsearch/issues/98703) + // add a synthetic field (so it doesn't clash with the user defined one) to return a constant + // to avoid the block from being trimmed + if (output.isEmpty()) { + var alias = new Alias(logicalFragment.source(), "", Literal.NULL, null, true); + List fields = singletonList(alias); + logicalFragment = new Eval(logicalFragment.source(), logicalFragment, fields); + output = Expressions.asAttributes(fields); + } + // add a logical projection (let the local replanning remove it if needed) + FragmentExec newChild = new FragmentExec( + Source.EMPTY, + new Project(logicalFragment.source(), logicalFragment, output), + fragmentExec.esFilter(), + fragmentExec.estimatedRowSize(), + fragmentExec.reducer() + ); + return new ExchangeExec(exec.source(), output, exec.inBetweenAggs(), newChild); + } + } + } else { + AttributeSet childOutput = currentPlanNode.inputSet(); + AttributeSet addedAttributes = currentPlanNode.outputSet().subtract(childOutput); + requiredAttributes.set(requiredAttributes.get().subtract(addedAttributes).combine(currentPlanNode.references())); + } + return currentPlanNode; + }); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/EnableSpatialDistancePushdown.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/EnableSpatialDistancePushdown.java new file mode 100644 index 0000000000000..e27418c2cf6a9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/EnableSpatialDistancePushdown.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.WellKnownBinary; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesUtils; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; +import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules; +import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.esql.plan.physical.FilterExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; + +import java.nio.ByteOrder; + +/** + * When a spatial distance predicate can be pushed down to lucene, this is done by capturing the distance within the same function. + * In principle this is like re-writing the predicate: + *

WHERE ST_DISTANCE(field, TO_GEOPOINT("POINT(0 0)")) <= 10000
+ * as: + *
WHERE ST_INTERSECTS(field, TO_GEOSHAPE("CIRCLE(0,0,10000)"))
+ */ +public class EnableSpatialDistancePushdown extends PhysicalOptimizerRules.ParameterizedOptimizerRule< + FilterExec, + LocalPhysicalOptimizerContext> { + + @Override + protected PhysicalPlan rule(FilterExec filterExec, LocalPhysicalOptimizerContext ctx) { + PhysicalPlan plan = filterExec; + if (filterExec.child() instanceof EsQueryExec) { + // Find and rewrite any binary comparisons that involve a distance function and a literal + var rewritten = filterExec.condition().transformDown(EsqlBinaryComparison.class, comparison -> { + ComparisonType comparisonType = ComparisonType.from(comparison.getFunctionType()); + if (comparison.left() instanceof StDistance dist && comparison.right().foldable()) { + return rewriteComparison(comparison, dist, comparison.right(), comparisonType); + } else if (comparison.right() instanceof StDistance dist && comparison.left().foldable()) { + return rewriteComparison(comparison, dist, comparison.left(), ComparisonType.invert(comparisonType)); + } + return comparison; + }); + if (rewritten.equals(filterExec.condition()) == false) { + plan = new FilterExec(filterExec.source(), filterExec.child(), rewritten); + } + } + + return plan; + } + + private Expression rewriteComparison( + EsqlBinaryComparison comparison, + StDistance dist, + Expression literal, + ComparisonType comparisonType + ) { + Object value = literal.fold(); + if (value instanceof Number number) { + if (dist.right().foldable()) { + return rewriteDistanceFilter(comparison, dist.left(), dist.right(), number, comparisonType); + } else if (dist.left().foldable()) { + return rewriteDistanceFilter(comparison, dist.right(), dist.left(), number, comparisonType); + } + } + return comparison; + } + + private Expression rewriteDistanceFilter( + EsqlBinaryComparison comparison, + Expression spatialExp, + Expression literalExp, + Number number, + ComparisonType comparisonType + ) { + Geometry geometry = SpatialRelatesUtils.makeGeometryFromLiteral(literalExp); + if (geometry instanceof Point point) { + double distance = number.doubleValue(); + Source source = comparison.source(); + if (comparisonType.lt) { + distance = comparisonType.eq ? distance : Math.nextDown(distance); + return new SpatialIntersects(source, spatialExp, makeCircleLiteral(point, distance, literalExp)); + } else if (comparisonType.gt) { + distance = comparisonType.eq ? distance : Math.nextUp(distance); + return new SpatialDisjoint(source, spatialExp, makeCircleLiteral(point, distance, literalExp)); + } else if (comparisonType.eq) { + return new And( + source, + new SpatialIntersects(source, spatialExp, makeCircleLiteral(point, distance, literalExp)), + new SpatialDisjoint(source, spatialExp, makeCircleLiteral(point, Math.nextDown(distance), literalExp)) + ); + } + } + return comparison; + } + + private Literal makeCircleLiteral(Point point, double distance, Expression literalExpression) { + var circle = new Circle(point.getX(), point.getY(), distance); + var wkb = WellKnownBinary.toWKB(circle, ByteOrder.LITTLE_ENDIAN); + return new Literal(literalExpression.source(), new BytesRef(wkb), DataType.GEO_SHAPE); + } + + /** + * This enum captures the key differences between various inequalities as perceived from the spatial distance function. + * In particular, we need to know which direction the inequality points, with lt=true meaning the left is expected to be smaller + * than the right. And eq=true meaning we expect euality as well. We currently don't support Equals and NotEquals, so the third + * field disables those. + */ + enum ComparisonType { + LTE(true, false, true), + LT(true, false, false), + GTE(false, true, true), + GT(false, true, false), + EQ(false, false, true); + + private final boolean lt; + private final boolean gt; + private final boolean eq; + + ComparisonType(boolean lt, boolean gt, boolean eq) { + this.lt = lt; + this.gt = gt; + this.eq = eq; + } + + static ComparisonType from(EsqlBinaryComparison.BinaryComparisonOperation op) { + return switch (op) { + case LT -> LT; + case LTE -> LTE; + case GT -> GT; + case GTE -> GTE; + default -> EQ; + }; + } + + static ComparisonType invert(ComparisonType comparisonType) { + return switch (comparisonType) { + case LT -> GT; + case LTE -> GTE; + case GT -> LT; + case GTE -> LTE; + default -> EQ; + }; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java new file mode 100644 index 0000000000000..7186a5194a262 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; +import org.elasticsearch.xpack.esql.core.expression.TypedAttribute; +import org.elasticsearch.xpack.esql.core.rule.Rule; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.ProjectAwayColumns; +import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; + +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +/** + * + * Materialize the concrete fields that need to be extracted from the storage until the last possible moment. + * Expects the local plan to already have a projection containing the fields needed upstream. + *

+ * 1. add the materialization right before usage inside the local plan + * 2. materialize any missing fields needed further up the chain + * + * @see ProjectAwayColumns + */ +public class InsertFieldExtraction extends Rule { + + @Override + public PhysicalPlan apply(PhysicalPlan plan) { + // apply the plan locally, adding a field extractor right before data is loaded + // by going bottom-up + plan = plan.transformUp(UnaryExec.class, p -> { + var missing = missingAttributes(p); + + /* + * If there is a single grouping then we'll try to use ords. Either way + * it loads the field lazily. If we have more than one field we need to + * make sure the fields are loaded for the standard hash aggregator. + */ + if (p instanceof AggregateExec agg && agg.groupings().size() == 1) { + var leaves = new LinkedList<>(); + // TODO: this seems out of place + agg.aggregates().stream().filter(a -> agg.groupings().contains(a) == false).forEach(a -> leaves.addAll(a.collectLeaves())); + var remove = agg.groupings().stream().filter(g -> leaves.contains(g) == false).toList(); + missing.removeAll(Expressions.references(remove)); + } + + // add extractor + if (missing.isEmpty() == false) { + // collect source attributes and add the extractor + var extractor = new FieldExtractExec(p.source(), p.child(), List.copyOf(missing)); + p = p.replaceChild(extractor); + } + + return p; + }); + + return plan; + } + + private static Set missingAttributes(PhysicalPlan p) { + var missing = new LinkedHashSet(); + var input = p.inputSet(); + + // collect field attributes used inside expressions + p.forEachExpression(TypedAttribute.class, f -> { + if (f instanceof FieldAttribute || f instanceof MetadataAttribute) { + if (input.contains(f) == false) { + missing.add(f); + } + } + }); + return missing; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/LucenePushDownUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/LucenePushDownUtils.java new file mode 100644 index 0000000000000..1242629c1da3c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/LucenePushDownUtils.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.stats.SearchStats; + +import java.util.function.Predicate; + +class LucenePushDownUtils { + /** + * this method is supposed to be used to define if a field can be used for exact push down (eg. sort or filter). + * "aggregatable" is the most accurate information we can have from field_caps as of now. + * Pushing down operations on fields that are not aggregatable would result in an error. + */ + public static boolean isAggregatable(FieldAttribute f) { + return f.exactAttribute().field().isAggregatable(); + } + + public static boolean hasIdenticalDelegate(FieldAttribute attr, SearchStats stats) { + return stats.hasIdenticalDelegate(attr.name()); + } + + public static boolean isPushableFieldAttribute(Expression exp, Predicate hasIdenticalDelegate) { + if (exp instanceof FieldAttribute fa && fa.getExactInfo().hasExact() && isAggregatable(fa)) { + return fa.dataType() != DataType.TEXT || hasIdenticalDelegate.test(fa); + } + return false; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushFiltersToSource.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushFiltersToSource.java new file mode 100644 index 0000000000000..f991429651c76 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushFiltersToSource.java @@ -0,0 +1,218 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; +import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; +import org.elasticsearch.xpack.esql.core.expression.predicate.Range; +import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogic; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; +import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RegexMatch; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardLike; +import org.elasticsearch.xpack.esql.core.querydsl.query.Query; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.CollectionUtils; +import org.elasticsearch.xpack.esql.core.util.Queries; +import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveBinaryComparison; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; +import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules; +import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.esql.plan.physical.FilterExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; + +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.esql.core.expression.predicate.Predicates.splitAnd; + +public class PushFiltersToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule { + + @Override + protected PhysicalPlan rule(FilterExec filterExec, LocalPhysicalOptimizerContext ctx) { + PhysicalPlan plan = filterExec; + if (filterExec.child() instanceof EsQueryExec queryExec) { + List pushable = new ArrayList<>(); + List nonPushable = new ArrayList<>(); + for (Expression exp : splitAnd(filterExec.condition())) { + (canPushToSource(exp, x -> LucenePushDownUtils.hasIdenticalDelegate(x, ctx.searchStats())) ? pushable : nonPushable).add( + exp + ); + } + // Combine GT, GTE, LT and LTE in pushable to Range if possible + List newPushable = combineEligiblePushableToRange(pushable); + if (newPushable.size() > 0) { // update the executable with pushable conditions + Query queryDSL = PlannerUtils.TRANSLATOR_HANDLER.asQuery(Predicates.combineAnd(newPushable)); + QueryBuilder planQuery = queryDSL.asBuilder(); + var query = Queries.combine(Queries.Clause.FILTER, asList(queryExec.query(), planQuery)); + queryExec = new EsQueryExec( + queryExec.source(), + queryExec.index(), + queryExec.indexMode(), + queryExec.output(), + query, + queryExec.limit(), + queryExec.sorts(), + queryExec.estimatedRowSize() + ); + if (nonPushable.size() > 0) { // update filter with remaining non-pushable conditions + plan = new FilterExec(filterExec.source(), queryExec, Predicates.combineAnd(nonPushable)); + } else { // prune Filter entirely + plan = queryExec; + } + } // else: nothing changes + } + + return plan; + } + + private static List combineEligiblePushableToRange(List pushable) { + List bcs = new ArrayList<>(); + List ranges = new ArrayList<>(); + List others = new ArrayList<>(); + boolean changed = false; + + pushable.forEach(e -> { + if (e instanceof GreaterThan || e instanceof GreaterThanOrEqual || e instanceof LessThan || e instanceof LessThanOrEqual) { + if (((EsqlBinaryComparison) e).right().foldable()) { + bcs.add((EsqlBinaryComparison) e); + } else { + others.add(e); + } + } else { + others.add(e); + } + }); + + for (int i = 0, step = 1; i < bcs.size() - 1; i += step, step = 1) { + BinaryComparison main = bcs.get(i); + for (int j = i + 1; j < bcs.size(); j++) { + BinaryComparison other = bcs.get(j); + if (main.left().semanticEquals(other.left())) { + // >/>= AND />= + else if ((other instanceof GreaterThan || other instanceof GreaterThanOrEqual) + && (main instanceof LessThan || main instanceof LessThanOrEqual)) { + bcs.remove(j); + bcs.remove(i); + + ranges.add( + new Range( + main.source(), + main.left(), + other.right(), + other instanceof GreaterThanOrEqual, + main.right(), + main instanceof LessThanOrEqual, + main.zoneId() + ) + ); + + changed = true; + step = 0; + break; + } + } + } + } + return changed ? CollectionUtils.combine(others, bcs, ranges) : pushable; + } + + public static boolean canPushToSource(Expression exp, Predicate hasIdenticalDelegate) { + if (exp instanceof BinaryComparison bc) { + return isAttributePushable(bc.left(), bc, hasIdenticalDelegate) && bc.right().foldable(); + } else if (exp instanceof InsensitiveBinaryComparison bc) { + return isAttributePushable(bc.left(), bc, hasIdenticalDelegate) && bc.right().foldable(); + } else if (exp instanceof BinaryLogic bl) { + return canPushToSource(bl.left(), hasIdenticalDelegate) && canPushToSource(bl.right(), hasIdenticalDelegate); + } else if (exp instanceof In in) { + return isAttributePushable(in.value(), null, hasIdenticalDelegate) && Expressions.foldable(in.list()); + } else if (exp instanceof Not not) { + return canPushToSource(not.field(), hasIdenticalDelegate); + } else if (exp instanceof UnaryScalarFunction usf) { + if (usf instanceof RegexMatch || usf instanceof IsNull || usf instanceof IsNotNull) { + if (usf instanceof IsNull || usf instanceof IsNotNull) { + if (usf.field() instanceof FieldAttribute fa && fa.dataType().equals(DataType.TEXT)) { + return true; + } + } + return isAttributePushable(usf.field(), usf, hasIdenticalDelegate); + } + } else if (exp instanceof CIDRMatch cidrMatch) { + return isAttributePushable(cidrMatch.ipField(), cidrMatch, hasIdenticalDelegate) && Expressions.foldable(cidrMatch.matches()); + } else if (exp instanceof SpatialRelatesFunction bc) { + return bc.canPushToSource(LucenePushDownUtils::isAggregatable); + } else if (exp instanceof MatchQueryPredicate mqp) { + return mqp.field() instanceof FieldAttribute && DataType.isString(mqp.field().dataType()); + } else if (exp instanceof StringQueryPredicate) { + return true; + } + return false; + } + + private static boolean isAttributePushable( + Expression expression, + Expression operation, + Predicate hasIdenticalDelegate + ) { + if (LucenePushDownUtils.isPushableFieldAttribute(expression, hasIdenticalDelegate)) { + return true; + } + if (expression instanceof MetadataAttribute ma && ma.searchable()) { + return operation == null + // no range or regex queries supported with metadata fields + || operation instanceof Equals + || operation instanceof NotEquals + || operation instanceof WildcardLike; + } + return false; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushLimitToSource.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushLimitToSource.java new file mode 100644 index 0000000000000..a6d3d5c1d537f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushLimitToSource.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules; +import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; +import org.elasticsearch.xpack.esql.plan.physical.LimitExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; + +public class PushLimitToSource extends PhysicalOptimizerRules.OptimizerRule { + @Override + protected PhysicalPlan rule(LimitExec limitExec) { + PhysicalPlan plan = limitExec; + PhysicalPlan child = limitExec.child(); + if (child instanceof EsQueryExec queryExec) { // add_task_parallelism_above_query: false + plan = queryExec.withLimit(limitExec.limit()); + } else if (child instanceof ExchangeExec exchangeExec && exchangeExec.child() instanceof EsQueryExec queryExec) { + plan = exchangeExec.replaceChild(queryExec.withLimit(limitExec.limit())); + } + return plan; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushStatsToSource.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushStatsToSource.java new file mode 100644 index 0000000000000..b0b86b43cd162 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushStatsToSource.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeMap; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.util.StringUtils; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules; +import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; + +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.StatsType.COUNT; + +/** + * Looks for the case where certain stats exist right before the query and thus can be pushed down. + */ +public class PushStatsToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule { + + @Override + protected PhysicalPlan rule(AggregateExec aggregateExec, LocalPhysicalOptimizerContext context) { + PhysicalPlan plan = aggregateExec; + if (aggregateExec.child() instanceof EsQueryExec queryExec) { + var tuple = pushableStats(aggregateExec, context); + + // for the moment support pushing count just for one field + List stats = tuple.v2(); + if (stats.size() > 1) { + return aggregateExec; + } + + // TODO: handle case where some aggs cannot be pushed down by breaking the aggs into two sources (regular + stats) + union + // use the stats since the attributes are larger in size (due to seen) + if (tuple.v2().size() == aggregateExec.aggregates().size()) { + plan = new EsStatsQueryExec( + aggregateExec.source(), + queryExec.index(), + queryExec.query(), + queryExec.limit(), + tuple.v1(), + tuple.v2() + ); + } + } + return plan; + } + + private Tuple, List> pushableStats( + AggregateExec aggregate, + LocalPhysicalOptimizerContext context + ) { + AttributeMap stats = new AttributeMap<>(); + Tuple, List> tuple = new Tuple<>(new ArrayList<>(), new ArrayList<>()); + + if (aggregate.groupings().isEmpty()) { + for (NamedExpression agg : aggregate.aggregates()) { + var attribute = agg.toAttribute(); + EsStatsQueryExec.Stat stat = stats.computeIfAbsent(attribute, a -> { + if (agg instanceof Alias as) { + Expression child = as.child(); + if (child instanceof Count count) { + var target = count.field(); + String fieldName = null; + QueryBuilder query = null; + // TODO: add count over field (has to be field attribute) + if (target.foldable()) { + fieldName = StringUtils.WILDCARD; + } + // check if regular field + else { + if (target instanceof FieldAttribute fa) { + var fName = fa.name(); + if (context.searchStats().isSingleValue(fName)) { + fieldName = fa.name(); + query = QueryBuilders.existsQuery(fieldName); + } + } + } + if (fieldName != null) { + return new EsStatsQueryExec.Stat(fieldName, COUNT, query); + } + } + } + return null; + }); + if (stat != null) { + List intermediateAttributes = AbstractPhysicalOperationProviders.intermediateAttributes( + singletonList(agg), + emptyList() + ); + // TODO: the attributes have been recreated here; they will have wrong name ids, and the dependency check will + // probably fail when we fix https://github.com/elastic/elasticsearch/issues/105436. + // We may need to refactor AbstractPhysicalOperationProviders.intermediateAttributes so it doesn't return just + // a list of attributes, but a mapping from the logical to the physical attributes. + tuple.v1().addAll(intermediateAttributes); + tuple.v2().add(stat); + } + } + } + + return tuple; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSource.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSource.java new file mode 100644 index 0000000000000..87bc344c397c1 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSource.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.expression.Order; +import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules; +import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.TopNExec; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; + +public class PushTopNToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule { + @Override + protected PhysicalPlan rule(TopNExec topNExec, LocalPhysicalOptimizerContext ctx) { + PhysicalPlan plan = topNExec; + PhysicalPlan child = topNExec.child(); + if (canPushSorts(child) + && canPushDownOrders(topNExec.order(), x -> LucenePushDownUtils.hasIdenticalDelegate(x, ctx.searchStats()))) { + var sorts = buildFieldSorts(topNExec.order()); + var limit = topNExec.limit(); + + if (child instanceof ExchangeExec exchangeExec && exchangeExec.child() instanceof EsQueryExec queryExec) { + plan = exchangeExec.replaceChild(queryExec.withSorts(sorts).withLimit(limit)); + } else { + plan = ((EsQueryExec) child).withSorts(sorts).withLimit(limit); + } + } + return plan; + } + + private static boolean canPushSorts(PhysicalPlan plan) { + if (plan instanceof EsQueryExec queryExec) { + return queryExec.canPushSorts(); + } + if (plan instanceof ExchangeExec exchangeExec && exchangeExec.child() instanceof EsQueryExec queryExec) { + return queryExec.canPushSorts(); + } + return false; + } + + private boolean canPushDownOrders(List orders, Predicate hasIdenticalDelegate) { + // allow only exact FieldAttributes (no expressions) for sorting + return orders.stream().allMatch(o -> LucenePushDownUtils.isPushableFieldAttribute(o.child(), hasIdenticalDelegate)); + } + + private List buildFieldSorts(List orders) { + List sorts = new ArrayList<>(orders.size()); + for (Order o : orders) { + sorts.add(new EsQueryExec.FieldSort(((FieldAttribute) o.child()).exactAttribute(), o.direction(), o.nullsPosition())); + } + return sorts; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java new file mode 100644 index 0000000000000..74ea6f99e5e59 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules; +import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; + +import java.util.List; + +import static org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules.TransformDirection.UP; + +public class ReplaceSourceAttributes extends PhysicalOptimizerRules.OptimizerRule { + + public ReplaceSourceAttributes() { + super(UP); + } + + @Override + protected PhysicalPlan rule(EsSourceExec plan) { + var docId = new FieldAttribute(plan.source(), EsQueryExec.DOC_ID_FIELD.getName(), EsQueryExec.DOC_ID_FIELD); + if (plan.indexMode() == IndexMode.TIME_SERIES) { + Attribute tsid = null, timestamp = null; + for (Attribute attr : plan.output()) { + String name = attr.name(); + if (name.equals(MetadataAttribute.TSID_FIELD)) { + tsid = attr; + } else if (name.equals(MetadataAttribute.TIMESTAMP_FIELD)) { + timestamp = attr; + } + } + if (tsid == null || timestamp == null) { + throw new IllegalStateException("_tsid or @timestamp are missing from the time-series source"); + } + return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), List.of(docId, tsid, timestamp), plan.query()); + } else { + return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), List.of(docId), plan.query()); + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java new file mode 100644 index 0000000000000..0bad99375d315 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; + +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialAggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules; +import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.esql.plan.physical.EvalExec; +import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; +import org.elasticsearch.xpack.esql.plan.physical.FilterExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class SpatialDocValuesExtraction extends PhysicalOptimizerRules.OptimizerRule { + @Override + protected PhysicalPlan rule(AggregateExec aggregate) { + var foundAttributes = new HashSet(); + + PhysicalPlan plan = aggregate.transformDown(UnaryExec.class, exec -> { + if (exec instanceof AggregateExec agg) { + var orderedAggregates = new ArrayList(); + var changedAggregates = false; + for (NamedExpression aggExpr : agg.aggregates()) { + if (aggExpr instanceof Alias as && as.child() instanceof SpatialAggregateFunction af) { + if (af.field() instanceof FieldAttribute fieldAttribute + && allowedForDocValues(fieldAttribute, agg, foundAttributes)) { + // We need to both mark the field to load differently, and change the spatial function to know to use it + foundAttributes.add(fieldAttribute); + changedAggregates = true; + orderedAggregates.add(as.replaceChild(af.withDocValues())); + } else { + orderedAggregates.add(aggExpr); + } + } else { + orderedAggregates.add(aggExpr); + } + } + if (changedAggregates) { + exec = new AggregateExec( + agg.source(), + agg.child(), + agg.groupings(), + orderedAggregates, + agg.getMode(), + agg.intermediateAttributes(), + agg.estimatedRowSize() + ); + } + } + if (exec instanceof EvalExec evalExec) { + List fields = evalExec.fields(); + List changed = fields.stream() + .map( + f -> (Alias) f.transformDown( + SpatialRelatesFunction.class, + spatialRelatesFunction -> (spatialRelatesFunction.hasFieldAttribute(foundAttributes)) + ? spatialRelatesFunction.withDocValues(foundAttributes) + : spatialRelatesFunction + ) + ) + .toList(); + if (changed.equals(fields) == false) { + exec = new EvalExec(exec.source(), exec.child(), changed); + } + } + if (exec instanceof FilterExec filterExec) { + // Note that ST_CENTROID does not support shapes, but SpatialRelatesFunction does, so when we extend the centroid + // to support shapes, we need to consider loading shape doc-values for both centroid and relates (ST_INTERSECTS) + var condition = filterExec.condition() + .transformDown( + SpatialRelatesFunction.class, + spatialRelatesFunction -> (spatialRelatesFunction.hasFieldAttribute(foundAttributes)) + ? spatialRelatesFunction.withDocValues(foundAttributes) + : spatialRelatesFunction + ); + if (filterExec.condition().equals(condition) == false) { + exec = new FilterExec(filterExec.source(), filterExec.child(), condition); + } + } + if (exec instanceof FieldExtractExec fieldExtractExec) { + // Tell the field extractor that it should extract the field from doc-values instead of source values + var attributesToExtract = fieldExtractExec.attributesToExtract(); + Set docValuesAttributes = new HashSet<>(); + for (Attribute found : foundAttributes) { + if (attributesToExtract.contains(found)) { + docValuesAttributes.add(found); + } + } + if (docValuesAttributes.size() > 0) { + exec = new FieldExtractExec(exec.source(), exec.child(), attributesToExtract, docValuesAttributes); + } + } + return exec; + }); + return plan; + } + + /** + * This function disallows the use of more than one field for doc-values extraction in the same spatial relation function. + * This is because comparing two doc-values fields is not supported in the current implementation. + */ + private boolean allowedForDocValues(FieldAttribute fieldAttribute, AggregateExec agg, Set foundAttributes) { + var candidateDocValuesAttributes = new HashSet<>(foundAttributes); + candidateDocValuesAttributes.add(fieldAttribute); + var spatialRelatesAttributes = new HashSet(); + agg.forEachExpressionDown(SpatialRelatesFunction.class, relatesFunction -> { + candidateDocValuesAttributes.forEach(candidate -> { + if (relatesFunction.hasFieldAttribute(Set.of(candidate))) { + spatialRelatesAttributes.add(candidate); + } + }); + }); + // Disallow more than one spatial field to be extracted using doc-values (for now) + return spatialRelatesAttributes.size() < 2; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index d2568e5f5031c..7beed64dda8cb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -62,11 +62,12 @@ import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.DOC_VALUES; import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.NONE; import static org.elasticsearch.xpack.esql.core.util.Queries.Clause.FILTER; -import static org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer.PushFiltersToSource.canPushToSource; -import static org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer.TRANSLATOR_HANDLER; +import static org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushFiltersToSource.canPushToSource; public class PlannerUtils { + public static final EsqlTranslatorHandler TRANSLATOR_HANDLER = new EsqlTranslatorHandler(); + public static Tuple breakPlanBetweenCoordinatorAndDataNode(PhysicalPlan plan, Configuration config) { var dataNodePlan = new Holder(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index f3c87e0e9d1d7..4e26baddd013b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.esql.core.util.NumericUtils; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.optimizer.FoldNull; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.FoldNull; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.planner.ToAggregator; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index c79f5ab8d086b..7bdeb2dad9e48 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -61,7 +61,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.optimizer.FoldNull; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.FoldNull; import org.elasticsearch.xpack.esql.parser.ExpressionBuilder; import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.planner.PlannerUtils; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java index 3ef2a7f821457..fed81d4260bcd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.optimizer.FoldNull; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.FoldNull; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.hamcrest.Matcher; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java deleted file mode 100644 index dc12f0231b79c..0000000000000 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.optimizer; - -import org.elasticsearch.xpack.esql.core.expression.Expression; - -public class FoldNull extends org.elasticsearch.xpack.esql.optimizer.rules.FoldNull { - @Override - public Expression rule(Expression e) { - return super.rule(e); - } -} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index 8ee84daf06802..3eef969f8f93b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.InferIsNotNull; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; @@ -467,7 +468,7 @@ public void testIsNotNullOnIsNullField() { Expression inn = isNotNull(fieldA); Filter f = new Filter(EMPTY, relation, inn); - assertEquals(f, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + assertEquals(f, new InferIsNotNull().apply(f)); } public void testIsNotNullOnOperatorWithOneField() { @@ -477,7 +478,7 @@ public void testIsNotNullOnOperatorWithOneField() { Filter f = new Filter(EMPTY, relation, inn); Filter expected = new Filter(EMPTY, relation, new And(EMPTY, isNotNull(fieldA), inn)); - assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + assertEquals(expected, new InferIsNotNull().apply(f)); } public void testIsNotNullOnOperatorWithTwoFields() { @@ -488,7 +489,7 @@ public void testIsNotNullOnOperatorWithTwoFields() { Filter f = new Filter(EMPTY, relation, inn); Filter expected = new Filter(EMPTY, relation, new And(EMPTY, new And(EMPTY, isNotNull(fieldA), isNotNull(fieldB)), inn)); - assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + assertEquals(expected, new InferIsNotNull().apply(f)); } public void testIsNotNullOnFunctionWithOneField() { @@ -500,7 +501,7 @@ public void testIsNotNullOnFunctionWithOneField() { Filter f = new Filter(EMPTY, relation, inn); Filter expected = new Filter(EMPTY, relation, new And(EMPTY, isNotNull(fieldA), inn)); - assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + assertEquals(expected, new InferIsNotNull().apply(f)); } public void testIsNotNullOnFunctionWithTwoFields() { @@ -512,7 +513,7 @@ public void testIsNotNullOnFunctionWithTwoFields() { Filter f = new Filter(EMPTY, relation, inn); Filter expected = new Filter(EMPTY, relation, new And(EMPTY, new And(EMPTY, isNotNull(fieldA), isNotNull(fieldB)), inn)); - assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + assertEquals(expected, new InferIsNotNull().apply(f)); } private IsNotNull isNotNull(Expression field) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 5268eb4349af8..9bfbb04aa6606 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -109,14 +109,16 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.IndexResolution; -import org.elasticsearch.xpack.esql.optimizer.rules.LiteralsOnTheRight; -import org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownAndCombineFilters; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownAndCombineLimits; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownEnrich; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownEval; -import org.elasticsearch.xpack.esql.optimizer.rules.PushDownRegexExtract; -import org.elasticsearch.xpack.esql.optimizer.rules.SplitInWithFoldableValue; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.FoldNull; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.LiteralsOnTheRight; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateNullable; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownAndCombineFilters; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownAndCombineLimits; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownEnrich; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownEval; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.PushDownRegexExtract; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.SplitInWithFoldableValue; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plan.GeneratingPlan; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 32993ca90cd83..96e8cd68ae9d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -73,6 +73,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.optimizer.rules.physical.ProjectAwayColumns; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; @@ -2099,7 +2100,7 @@ public void testFieldExtractWithoutSourceAttributes() { } public void testProjectAwayColumns() { - var rule = new PhysicalPlanOptimizer.ProjectAwayColumns(); + var rule = new ProjectAwayColumns(); // FROM test | limit 10000 // diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java deleted file mode 100644 index a7a996230facd..0000000000000 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.optimizer; - -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; - -public class PropagateNullable extends org.elasticsearch.xpack.esql.optimizer.rules.PropagateNullable { - @Override - public Expression rule(And and) { - return super.rule(and); - } -} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsEliminationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanFunctionEqualsEliminationTests.java similarity index 97% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsEliminationTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanFunctionEqualsEliminationTests.java index d5d274d0fc62f..08c8612d8097c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsEliminationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanFunctionEqualsEliminationTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplificationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanSimplificationTests.java similarity index 98% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplificationTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanSimplificationTests.java index 6864bf4b9ebef..3b1f8cfc83af3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplificationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/BooleanSimplificationTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineBinaryComparisonsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineBinaryComparisonsTests.java similarity index 99% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineBinaryComparisonsTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineBinaryComparisonsTests.java index 3c98b7fa23e8b..d388369e0b167 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineBinaryComparisonsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineBinaryComparisonsTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.EsqlTestUtils; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineDisjunctionsTests.java similarity index 99% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineDisjunctionsTests.java index 2060327f1e18d..043d18dac9fd4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineDisjunctionsTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFoldingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConstantFoldingTests.java similarity index 99% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFoldingTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConstantFoldingTests.java index 366116d33901f..a74ceb4e1426c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFoldingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ConstantFoldingTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Alias; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNullTests.java similarity index 97% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNullTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNullTests.java index c9302937b1391..85b5cfa9ed521 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNullTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRightTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/LiteralsOnTheRightTests.java similarity index 95% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRightTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/LiteralsOnTheRightTests.java index a884080504db8..17e69e81444c5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRightTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/LiteralsOnTheRightTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Alias; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEqualsTests.java similarity index 99% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEqualsTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEqualsTests.java index 99632fa127a3b..55091653e75d4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateEqualsTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullableTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateNullableTests.java similarity index 99% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullableTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateNullableTests.java index 29d5bb4cab907..0838777a84e61 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullableTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropagateNullableTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatchTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatchTests.java similarity index 98% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatchTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatchTests.java index 62b13e6c9cc03..c5e64d41be4dc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatchTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatchTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.optimizer.rules; +package org.elasticsearch.xpack.esql.optimizer.rules.logical; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; From 04678e9a15a5f33241ac007173396593736f96e6 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 5 Sep 2024 10:43:20 +0100 Subject: [PATCH 063/115] [DOCS][ESQL] Include bucket in agg functions list (#112513) --- docs/reference/esql/processing-commands/stats.asciidoc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/reference/esql/processing-commands/stats.asciidoc b/docs/reference/esql/processing-commands/stats.asciidoc index 7377522a93201..0c479c1f62b76 100644 --- a/docs/reference/esql/processing-commands/stats.asciidoc +++ b/docs/reference/esql/processing-commands/stats.asciidoc @@ -3,7 +3,7 @@ === `STATS ... BY` The `STATS ... BY` processing command groups rows according to a common value -and calculate one or more aggregated values over the grouped rows. +and calculates one or more aggregated values over the grouped rows. **Syntax** @@ -41,6 +41,10 @@ The following <> are supported: include::../functions/aggregation-functions.asciidoc[tag=agg_list] +The following <> are supported: + +include::../functions/grouping-functions.asciidoc[tag=group_list] + NOTE: `STATS` without any groups is much much faster than adding a group. NOTE: Grouping on a single expression is currently much more optimized than grouping From 02dfcbc9cf911d834bd42e06d462fe48fa28ad58 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Thu, 5 Sep 2024 12:21:49 +0200 Subject: [PATCH 064/115] ES|QL: better validation for LIKE and RLIKE patterns (#112489) --- docs/changelog/112489.yaml | 6 ++++++ .../expression/predicate/regex/RLikePattern.java | 4 ++++ .../predicate/regex/StringPattern.java | 5 +++++ .../rules/logical/ReplaceRegexMatch.java | 14 +++++++++++++- .../xpack/esql/parser/ExpressionBuilder.java | 8 +++++++- .../optimizer/LogicalPlanOptimizerTests.java | 16 ++++++++++++++++ .../xpack/esql/parser/StatementParserTests.java | 6 ++++++ 7 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/112489.yaml diff --git a/docs/changelog/112489.yaml b/docs/changelog/112489.yaml new file mode 100644 index 0000000000000..ebc84927b0e76 --- /dev/null +++ b/docs/changelog/112489.yaml @@ -0,0 +1,6 @@ +pr: 112489 +summary: "ES|QL: better validation for RLIKE patterns" +area: ES|QL +type: bug +issues: + - 112485 diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RLikePattern.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RLikePattern.java index 4257285ba8bd7..f437dc5819dcb 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RLikePattern.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RLikePattern.java @@ -41,4 +41,8 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(regexpPattern); } + + public String pattern() { + return regexpPattern; + } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/StringPattern.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/StringPattern.java index cb2bdd55937b6..c513f91fb6be0 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/StringPattern.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/StringPattern.java @@ -25,4 +25,9 @@ default boolean matchesAll() { * If the pattern is not exact, null is returned. */ String exactMatch(); + + /** + * Returns the pattern as it was defined by the user. + */ + String pattern(); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatch.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatch.java index b462a99bc5e14..1a8f8a164cc1b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatch.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRegexMatch.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.regex.StringPattern; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.parser.ParsingException; public final class ReplaceRegexMatch extends OptimizerRules.OptimizerExpressionRule> { @@ -25,7 +26,18 @@ public ReplaceRegexMatch() { public Expression rule(RegexMatch regexMatch) { Expression e = regexMatch; StringPattern pattern = regexMatch.pattern(); - if (pattern.matchesAll()) { + boolean matchesAll; + try { + matchesAll = pattern.matchesAll(); + } catch (IllegalArgumentException ex) { + throw new ParsingException( + regexMatch.source(), + "Invalid regex pattern for RLIKE [{}]: [{}]", + regexMatch.pattern().pattern(), + ex.getMessage() + ); + } + if (matchesAll) { e = new IsNotNull(e.source(), regexMatch.field()); } else { String match = pattern.exactMatch(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 0352afdee4622..2621c76805591 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -604,7 +604,13 @@ public Expression visitRegexBooleanExpression(EsqlBaseParser.RegexBooleanExpress Expression left = expression(ctx.valueExpression()); Literal pattern = visitString(ctx.pattern); RegexMatch result = switch (type) { - case EsqlBaseParser.LIKE -> new WildcardLike(source, left, new WildcardPattern(pattern.fold().toString())); + case EsqlBaseParser.LIKE -> { + try { + yield new WildcardLike(source, left, new WildcardPattern(pattern.fold().toString())); + } catch (InvalidArgumentException e) { + throw new ParsingException(source, "Invalid pattern for LIKE [{}]: [{}]", pattern, e.getMessage()); + } + } case EsqlBaseParser.RLIKE -> new RLike(source, left, new RLikePattern(pattern.fold().toString())); default -> throw new ParsingException("Invalid predicate type for [{}]", source.text()); }; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 9bfbb04aa6606..c769d01e7381b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -1940,6 +1940,22 @@ public void testSimplifyRLikeMatchAll() { assertTrue(filter.child() instanceof EsRelation); } + public void testRLikeWrongPattern() { + String query = "from test | where first_name rlike \"(?i)(^|[^a-zA-Z0-9_-])nmap($|\\\\.)\""; + String error = "line 1:20: Invalid regex pattern for RLIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + + "[invalid range: from (95) cannot be > to (93)]"; + ParsingException e = expectThrows(ParsingException.class, () -> plan(query)); + assertThat(e.getMessage(), is(error)); + } + + public void testLikeWrongPattern() { + String query = "from test | where first_name like \"(?i)(^|[^a-zA-Z0-9_-])nmap($|\\\\.)\""; + String error = "line 1:20: Invalid pattern for LIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + + "[Invalid sequence - escape character is not followed by special wildcard char]"; + ParsingException e = expectThrows(ParsingException.class, () -> plan(query)); + assertThat(e.getMessage(), is(error)); + } + public void testFoldNullInToLocalRelation() { LogicalPlan plan = optimizedPlan(""" from test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 0d4615b44aa35..35a22fd542a1e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -815,6 +815,12 @@ public void testLikeRLike() { expectError("from a | where foo like 12", "mismatched input '12'"); expectError("from a | where foo rlike 12", "mismatched input '12'"); + + expectError( + "from a | where foo like \"(?i)(^|[^a-zA-Z0-9_-])nmap($|\\\\.)\"", + "line 1:17: Invalid pattern for LIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + + "[Invalid sequence - escape character is not followed by special wildcard char]" + ); } public void testEnrich() { From a2df7e7229e772eddc9a8aba2ecfdbd162810c78 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Thu, 5 Sep 2024 15:44:58 +0200 Subject: [PATCH 065/115] Yet more unsupported locales for Kerb tests (#112555) Adds more locales, in addition to https://github.com/elastic/elasticsearch/pull/109670 (see the original PR for context). Closes: https://github.com/elastic/elasticsearch/issues/112533 Closes: https://github.com/elastic/elasticsearch/issues/112534 --- .../xpack/security/authc/kerberos/KerberosTestCase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index c28e5f1e0fce8..cd17723c2635c 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -86,7 +86,8 @@ public abstract class KerberosTestCase extends ESTestCase { "sd", "mni", "sat", - "sa" + "sa", + "bgc" ); @BeforeClass From 4aa3c3d7ee826b5ba3293a618a554bd0b2faceaa Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 5 Sep 2024 09:25:53 -0500 Subject: [PATCH 066/115] Add support for templates when validating mappings in the simulate ingest API (#111161) --- docs/changelog/111161.yaml | 6 + .../ingest/apis/simulate-ingest.asciidoc | 6 +- .../test/ingest/80_ingest_simulate.yml | 85 ++++++ .../test/simulate.ingest/10_basic.yml | 268 +++++++++++++++++ .../bulk/TransportSimulateBulkActionIT.java | 278 ++++++++++++++++++ .../TransportSimulateIndexTemplateAction.java | 18 +- .../action/bulk/BulkFeatures.java | 3 +- .../bulk/TransportSimulateBulkAction.java | 132 ++++++++- .../action/ingest/SimulateIndexResponse.java | 4 + .../metadata/MetadataCreateIndexService.java | 2 +- .../TransportSimulateBulkActionTests.java | 63 +++- 11 files changed, 841 insertions(+), 24 deletions(-) create mode 100644 docs/changelog/111161.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java diff --git a/docs/changelog/111161.yaml b/docs/changelog/111161.yaml new file mode 100644 index 0000000000000..c081d555ff1ee --- /dev/null +++ b/docs/changelog/111161.yaml @@ -0,0 +1,6 @@ +pr: 111161 +summary: Add support for templates when validating mappings in the simulate ingest + API +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/reference/ingest/apis/simulate-ingest.asciidoc b/docs/reference/ingest/apis/simulate-ingest.asciidoc index 36f1f089ce90e..ee84a39ee6f65 100644 --- a/docs/reference/ingest/apis/simulate-ingest.asciidoc +++ b/docs/reference/ingest/apis/simulate-ingest.asciidoc @@ -119,7 +119,11 @@ as well the same way that a non-simulated ingest would. No data is indexed into {es}. Instead, the transformed document is returned, along with the list of pipelines that have been executed and the name of the index where the document would have been indexed if this were -not a simulation. This differs from the +not a simulation. The transformed document is validated against the +mappings that would apply to this index, and any validation error is +reported in the result. + +This API differs from the <> in that you specify a single pipeline for that API, and it only runs that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index a42b987a9bddd..1a77019914283 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -212,3 +212,88 @@ setup: - match: { docs.1.doc._index: "index" } - match: { docs.1.doc._source.field1: "BAR" } - match: { docs.1.doc.executed_pipelines: ["my-pipeline"] } + +--- +"Test ingest simulate with reroute and mapping validation from templates": + + - skip: + features: headers + + - requires: + cluster_features: ["simulate.mapping.validation.templates"] + reason: "ingest simulate index mapping validation added in 8.16" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "reroute-pipeline" + body: > + { + "processors": [ + { + "reroute": { + "destination": "second-index" + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.put_index_template: + name: first-index-template + body: + index_patterns: first-index* + template: + settings: + default_pipeline: "reroute-pipeline" + mappings: + dynamic: strict + properties: + foo: + type: text + + - do: + indices.put_index_template: + name: second-index-template + body: + index_patterns: second-index* + template: + mappings: + dynamic: strict + properties: + bar: + type: text + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "first-index", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "first-index", + "_id": "id", + "_source": { + "bar": "foo" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "second-index" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + - match: { docs.0.doc.error.reason: "[1:8] mapping set to strict, dynamic introduction of [foo] within [_doc] is not allowed" } + - match: { docs.1.doc._index: "second-index" } + - match: { docs.1.doc._source.bar: "foo" } + - not_exists: docs.1.doc.error diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml index 5928dce2c104e..a32969b0b69b2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml @@ -258,6 +258,274 @@ setup: - not_exists: docs.1.doc.error --- +"Test mapping validation from templates": + + - skip: + features: headers + + - requires: + cluster_features: ["simulate.mapping.validation.templates"] + reason: "ingest simulate index mapping validation added in 8.16" + + - do: + indices.put_template: + name: v1_template + body: + index_patterns: v1_strict_nonexistent* + mappings: + dynamic: strict + properties: + foo: + type: text + + - do: + indices.put_index_template: + name: v2_template + body: + index_patterns: v2_strict_nonexistent* + template: + mappings: + dynamic: strict + properties: + foo: + type: text + + - do: + indices.put_index_template: + name: v2_hidden_template + body: + index_patterns: v2_strict_hidden_nonexistent* + template: + settings: + index: + hidden: true + mappings: + dynamic: strict + properties: + foo: + type: text + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "v1_strict_nonexistent_index", + "_id": "id", + "_source": { + "foob": "bar" + } + }, + { + "_index": "v1_strict_nonexistent_index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._source.foob: "bar" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + - match: { docs.0.doc.error.reason: "[1:9] mapping set to strict, dynamic introduction of [foob] within [_doc] is not allowed" } + - match: { docs.1.doc._source.foo: "rab" } + - not_exists: docs.1.doc.error + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "v2_strict_nonexistent_index", + "_id": "id", + "_source": { + "foob": "bar" + } + }, + { + "_index": "v2_strict_nonexistent_index", + "_id": "id", + "_source": { + "foo": "rab" + } + }, + { + "_index": "v2_strict_hidden_nonexistent_index", + "_id": "id", + "_source": { + "foob": "bar" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 3 } + - match: { docs.0.doc._source.foob: "bar" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + - match: { docs.0.doc.error.reason: "[1:9] mapping set to strict, dynamic introduction of [foob] within [_doc] is not allowed" } + - match: { docs.1.doc._source.foo: "rab" } + - not_exists: docs.1.doc.error + - match: { docs.2.doc._source.foob: "bar" } + - match: { docs.2.doc.error.type: "strict_dynamic_mapping_exception" } + - match: { docs.2.doc.error.reason: "[1:9] mapping set to strict, dynamic introduction of [foob] within [_doc] is not allowed" } + +--- +"Test mapping validation for data streams from templates": + + - skip: + features: + - headers + - allowed_warnings + + - requires: + cluster_features: ["simulate.mapping.validation.templates"] + reason: "ingest simulate index mapping validation added in 8.16" + + - do: + allowed_warnings: + - "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation" + indices.put_index_template: + name: my-template1 + body: + index_patterns: [simple-data-stream1] + template: + settings: + index.number_of_replicas: 1 + mappings: + dynamic: strict + properties: + foo: + type: text + data_stream: {} + + - do: + allowed_warnings: + - "index template [my-hidden-template1] has index patterns [simple-hidden-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-hidden-template1] will take precedence during new index creation" + indices.put_index_template: + name: my-hidden-template1 + body: + index_patterns: [simple-hidden-data-stream1] + template: + settings: + index.number_of_replicas: 1 + mappings: + dynamic: strict + properties: + foo: + type: text + data_stream: + hidden: true + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "simple-data-stream1", + "_id": "id", + "_source": { + "@timestamp": "2020-12-12", + "foob": "bar" + } + }, + { + "_index": "simple-data-stream1", + "_id": "id", + "_source": { + "@timestamp": "2020-12-12", + "foo": "rab" + } + }, + { + "_index": "simple-hidden-data-stream1", + "_id": "id", + "_source": { + "@timestamp": "2020-12-12", + "foob": "bar" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 3 } + - match: { docs.0.doc._source.foob: "bar" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + - match: { docs.0.doc.error.reason: "[1:35] mapping set to strict, dynamic introduction of [foob] within [_doc] is not allowed" } + - match: { docs.1.doc._source.foo: "rab" } + - not_exists: docs.1.doc.error + - match: { docs.2.doc._source.foob: "bar" } + - match: { docs.2.doc.error.type: "strict_dynamic_mapping_exception" } + - match: { docs.2.doc.error.reason: "[1:35] mapping set to strict, dynamic introduction of [foob] within [_doc] is not allowed" } + + - do: + indices.create_data_stream: + name: simple-data-stream1 + - is_true: acknowledged + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "simple-data-stream1", + "_id": "id", + "_source": { + "@timestamp": "2020-12-12", + "foob": "bar" + } + }, + { + "_index": "simple-data-stream1", + "_id": "id", + "_source": { + "@timestamp": "2020-12-12", + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._source.foob: "bar" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + - match: { docs.0.doc.error.reason: "[1:35] mapping set to strict, dynamic introduction of [foob] within [_doc] is not allowed" } + - match: { docs.1.doc._source.foo: "rab" } + - not_exists: docs.1.doc.error +--- "Test index templates with pipelines": - skip: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java new file mode 100644 index 0000000000000..4a56a6ce8ddb6 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java @@ -0,0 +1,278 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.SimulateIndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class TransportSimulateBulkActionIT extends ESIntegTestCase { + @SuppressWarnings("unchecked") + public void testMappingValidationIndexExists() { + /* + * This test simulates a BulkRequest of two documents into an existing index. Then we make sure the index contains no documents, and + * that the index's mapping in the cluster state has not been updated with the two new field. + */ + String indexName = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + String mapping = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + } + } + } + } + """; + indicesAdmin().create(new CreateIndexRequest(indexName).mapping(mapping)).actionGet(); + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo1": "baz" + } + """, XContentType.JSON).id(randomUUID())); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo3": "baz" + } + """, XContentType.JSON).id(randomUUID())); + BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); + assertThat(response.getItems().length, equalTo(2)); + assertThat(response.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertNull(((SimulateIndexResponse) response.getItems()[0].getResponse()).getException()); + assertThat(response.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat( + ((SimulateIndexResponse) response.getItems()[1].getResponse()).getException().getMessage(), + containsString("mapping set to strict, dynamic introduction of") + ); + indicesAdmin().refresh(new RefreshRequest(indexName)).actionGet(); + SearchResponse searchResponse = client().search(new SearchRequest(indexName)).actionGet(); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + searchResponse.decRef(); + ClusterStateResponse clusterStateResponse = admin().cluster().state(new ClusterStateRequest()).actionGet(); + Map indexMapping = clusterStateResponse.getState().metadata().index(indexName).mapping().sourceAsMap(); + Map fields = (Map) indexMapping.get("properties"); + assertThat(fields.size(), equalTo(1)); + } + + public void testMappingValidationIndexDoesNotExistsNoTemplate() { + /* + * This test simulates a BulkRequest of two documents into an index that does not exist. There is no template (other than the + * mapping-less "random-index-template" created by the parent class), so we expect no mapping validation failure. + */ + String indexName = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo1": "baz" + } + """, XContentType.JSON).id(randomUUID())); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo3": "baz" + } + """, XContentType.JSON).id(randomUUID())); + BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); + assertThat(response.getItems().length, equalTo(2)); + assertThat(response.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertNull(((SimulateIndexResponse) response.getItems()[0].getResponse()).getException()); + assertThat(response.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertNull(((SimulateIndexResponse) response.getItems()[1].getResponse()).getException()); + } + + public void testMappingValidationIndexDoesNotExistsV2Template() throws IOException { + /* + * This test simulates a BulkRequest of two documents into an index that does not exist. The index matches a v2 index template. It + * has strict mappings and one of our documents has it as a field not in the mapping, so we expect a mapping validation error. + */ + String indexName = "my-index-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + String mappingString = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + } + } + } + } + """; + CompressedXContent mapping = CompressedXContent.fromJSON(mappingString); + Template template = new Template(Settings.EMPTY, mapping, null); + ComposableIndexTemplate composableIndexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("my-index-*")) + .template(template) + .build(); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test"); + request.indexTemplate(composableIndexTemplate); + + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo1": "baz" + } + """, XContentType.JSON).id(randomUUID())); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo3": "baz" + } + """, XContentType.JSON).id(randomUUID())); + BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); + assertThat(response.getItems().length, equalTo(2)); + assertThat(response.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertNull(((SimulateIndexResponse) response.getItems()[0].getResponse()).getException()); + assertThat(response.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat( + ((SimulateIndexResponse) response.getItems()[1].getResponse()).getException().getMessage(), + containsString("mapping set to strict, dynamic introduction of") + ); + } + + public void testMappingValidationIndexDoesNotExistsV1Template() { + /* + * This test simulates a BulkRequest of two documents into an index that does not exist. The index matches a v1 index template. It + * has a mapping that defines "foo1" as an integer field and one of our documents has it as a string, so we expect a mapping + * validation exception. + */ + String indexName = "my-index-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + indicesAdmin().putTemplate( + new PutIndexTemplateRequest("test-template").patterns(List.of("my-index-*")).mapping("foo1", "type=integer") + ).actionGet(); + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo1": "baz" + } + """, XContentType.JSON).id(randomUUID())); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo3": "baz" + } + """, XContentType.JSON).id(randomUUID())); + BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); + assertThat(response.getItems().length, equalTo(2)); + assertThat(response.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat( + ((SimulateIndexResponse) response.getItems()[0].getResponse()).getException().getMessage(), + containsString("failed to parse field [foo1] of type [integer] ") + ); + assertNull(((SimulateIndexResponse) response.getItems()[1].getResponse()).getException()); + assertThat(response.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + } + + public void testMappingValidationIndexDoesNotExistsDataStream() throws IOException { + /* + * This test simulates a BulkRequest of two documents into an index that does not exist. The index matches a v2 index template. It + * has strict mappings and one of our documents has it as a field not in the mapping, so we expect a mapping validation error. + */ + String indexName = "my-data-stream-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + String mappingString = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + } + } + } + } + """; + CompressedXContent mapping = CompressedXContent.fromJSON(mappingString); + Template template = new Template(Settings.EMPTY, mapping, null); + ComposableIndexTemplate.DataStreamTemplate dataStreamTemplate = new ComposableIndexTemplate.DataStreamTemplate(); + ComposableIndexTemplate composableIndexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("my-data-stream-*")) + .dataStreamTemplate(dataStreamTemplate) + .template(template) + .build(); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test"); + request.indexTemplate(composableIndexTemplate); + + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + { + // First, try with no @timestamp to make sure we're picking up data-stream-specific templates + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo1": "baz" + } + """, XContentType.JSON).id(randomUUID())); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "foo3": "baz" + } + """, XContentType.JSON).id(randomUUID())); + BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); + assertThat(response.getItems().length, equalTo(2)); + assertThat(response.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat( + ((SimulateIndexResponse) response.getItems()[0].getResponse()).getException().getMessage(), + containsString("data stream timestamp field [@timestamp] is missing") + ); + assertThat(response.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat( + ((SimulateIndexResponse) response.getItems()[1].getResponse()).getException().getMessage(), + containsString("mapping set to strict, dynamic introduction of") + ); + } + { + // Now with @timestamp + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "@timestamp": "2024-08-27", + "foo1": "baz" + } + """, XContentType.JSON).id(randomUUID())); + bulkRequest.add(new IndexRequest(indexName).source(""" + { + "@timestamp": "2024-08-27", + "foo3": "baz" + } + """, XContentType.JSON).id(randomUUID())); + BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); + assertThat(response.getItems().length, equalTo(2)); + assertThat(response.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertNull(((SimulateIndexResponse) response.getItems()[0].getResponse()).getException()); + assertThat(response.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat( + ((SimulateIndexResponse) response.getItems()[1].getResponse()).getException().getMessage(), + containsString("mapping set to strict, dynamic introduction of") + ); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 6fcaad47e0d72..cd8ffea3d3824 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -249,13 +250,20 @@ public static Template resolveTemplate( .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - // empty request mapping as the user can't specify any explicit mappings via the simulate api + /* + * If the index name doesn't look like a data stream backing index, then MetadataCreateIndexService.collectV2Mappings() won't + * include data stream specific mappings in its response. + */ + String simulatedIndexName = template.getDataStreamTemplate() != null + && indexName.startsWith(DataStream.BACKING_INDEX_PREFIX) == false + ? DataStream.getDefaultBackingIndexName(indexName, 1) + : indexName; List mappings = MetadataCreateIndexService.collectV2Mappings( - null, + null, // empty request mapping as the user can't specify any explicit mappings via the simulate api simulatedState, matchingTemplate, xContentRegistry, - indexName + simulatedIndexName ); // First apply settings sourced from index settings providers @@ -303,7 +311,9 @@ public static Template resolveTemplate( ) ); - Map aliasesByName = aliases.stream().collect(Collectors.toMap(AliasMetadata::getAlias, Function.identity())); + Map aliasesByName = aliases == null + ? Map.of() + : aliases.stream().collect(Collectors.toMap(AliasMetadata::getAlias, Function.identity())); CompressedXContent mergedMapping = indicesService.withTempIndexService( indexMetadata, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java index 99c2d994a8bd0..b8dd0d1fe415e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java @@ -14,9 +14,10 @@ import java.util.Set; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION; +import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION_TEMPLATES; public class BulkFeatures implements FeatureSpecification { public Set getFeatures() { - return Set.of(SIMULATE_MAPPING_VALIDATION); + return Set.of(SIMULATE_MAPPING_VALIDATION, SIMULATE_MAPPING_VALIDATION_TEMPLATES); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index 2312a75b91084..8da6fb409cb90 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -10,16 +10,27 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.template.post.TransportSimulateIndexTemplateAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.SimulateIndexResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.index.IndexSettingProviders; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; @@ -35,9 +46,18 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findV1Templates; +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findV2Template; + /** * This action simulates bulk indexing data. Pipelines are executed for all indices that the request routes to, but no data is actually * indexed and no state is changed. Unlike TransportBulkAction, this does not push the work out to the nodes where the shards live (since @@ -45,7 +65,10 @@ */ public class TransportSimulateBulkAction extends TransportAbstractBulkAction { public static final NodeFeature SIMULATE_MAPPING_VALIDATION = new NodeFeature("simulate.mapping.validation"); + public static final NodeFeature SIMULATE_MAPPING_VALIDATION_TEMPLATES = new NodeFeature("simulate.mapping.validation.templates"); private final IndicesService indicesService; + private final NamedXContentRegistry xContentRegistry; + private final Set indexSettingProviders; @Inject public TransportSimulateBulkAction( @@ -56,7 +79,9 @@ public TransportSimulateBulkAction( ActionFilters actionFilters, IndexingPressure indexingPressure, SystemIndices systemIndices, - IndicesService indicesService + IndicesService indicesService, + NamedXContentRegistry xContentRegistry, + IndexSettingProviders indexSettingProviders ) { super( SimulateBulkAction.INSTANCE, @@ -71,6 +96,8 @@ public TransportSimulateBulkAction( threadPool::relativeTimeInNanos ); this.indicesService = indicesService; + this.xContentRegistry = xContentRegistry; + this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); } @Override @@ -128,9 +155,9 @@ private Exception validateMappings(IndexRequest request) { ClusterState state = clusterService.state(); Exception mappingValidationException = null; IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(request.index()); - if (indexAbstraction != null) { - IndexMetadata imd = state.metadata().getIndexSafe(indexAbstraction.getWriteIndex(request, state.metadata())); - try { + try { + if (indexAbstraction != null) { + IndexMetadata imd = state.metadata().getIndexSafe(indexAbstraction.getWriteIndex(request, state.metadata())); indicesService.withTempIndexService(imd, indexService -> { indexService.mapperService().updateMapping(null, imd); return IndexShard.prepareIndex( @@ -148,9 +175,102 @@ private Exception validateMappings(IndexRequest request) { 0 ); }); - } catch (Exception e) { - mappingValidationException = e; + } else { + /* + * The index did not exist, so we put together the mappings from existing templates. + * This reproduces a lot of the mapping resolution logic in MetadataCreateIndexService.applyCreateIndexRequest(). However, + * it does not deal with aliases (since an alias cannot be created if an index does not exist, and this is the path for + * when the index does not exist). And it does not deal with system indices since we do not intend for users to simulate + * writing to system indices. + */ + String matchingTemplate = findV2Template(state.metadata(), request.index(), false); + if (matchingTemplate != null) { + final Template template = TransportSimulateIndexTemplateAction.resolveTemplate( + matchingTemplate, + request.index(), + state, + isDataStreamsLifecycleOnlyMode(clusterService.getSettings()), + xContentRegistry, + indicesService, + systemIndices, + indexSettingProviders + ); + CompressedXContent mappings = template.mappings(); + if (mappings != null) { + MappingMetadata mappingMetadata = new MappingMetadata(mappings); + Settings dummySettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + final IndexMetadata imd = IndexMetadata.builder(request.index()) + .settings(dummySettings) + .putMapping(mappingMetadata) + .build(); + indicesService.withTempIndexService(imd, indexService -> { + indexService.mapperService().updateMapping(null, imd); + return IndexShard.prepareIndex( + indexService.mapperService(), + sourceToParse, + SequenceNumbers.UNASSIGNED_SEQ_NO, + -1, + -1, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + Long.MIN_VALUE, + false, + request.ifSeqNo(), + request.ifPrimaryTerm(), + 0 + ); + }); + } + } else { + List matchingTemplates = findV1Templates(state.metadata(), request.index(), false); + final Map mappingsMap = MetadataCreateIndexService.parseV1Mappings( + "{}", + matchingTemplates.stream().map(IndexTemplateMetadata::getMappings).collect(toList()), + xContentRegistry + ); + final CompressedXContent combinedMappings; + if (mappingsMap.isEmpty()) { + combinedMappings = null; + } else { + combinedMappings = new CompressedXContent(mappingsMap); + } + Settings dummySettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + MappingMetadata mappingMetadata = combinedMappings == null ? null : new MappingMetadata(combinedMappings); + final IndexMetadata imd = IndexMetadata.builder(request.index()) + .putMapping(mappingMetadata) + .settings(dummySettings) + .build(); + indicesService.withTempIndexService(imd, indexService -> { + indexService.mapperService().updateMapping(null, imd); + return IndexShard.prepareIndex( + indexService.mapperService(), + sourceToParse, + SequenceNumbers.UNASSIGNED_SEQ_NO, + -1, + -1, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + Long.MIN_VALUE, + false, + request.ifSeqNo(), + request.ifPrimaryTerm(), + 0 + ); + }); + } } + } catch (Exception e) { + mappingValidationException = e; } return mappingValidationException; } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java index 445492f037926..258cd5ceaa8e7 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java @@ -96,6 +96,10 @@ public void writeTo(StreamOutput out) throws IOException { } } + public Exception getException() { + return this.exception; + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 02b7312b4a99d..17db4f9253824 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -902,7 +902,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( * {@link IndexTemplateMetadata#order()}). This merging makes no distinction between field * definitions, as may result in an invalid field definition */ - static Map parseV1Mappings( + public static Map parseV1Mappings( String mappingsJson, List templateMappings, NamedXContentRegistry xContentRegistry diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java index 8d4017a756e48..3a066ab85147d 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.action.ingest.SimulateIndexResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -25,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.IndexingPressure; @@ -39,6 +42,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; @@ -49,6 +53,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -85,7 +90,9 @@ class TestTransportSimulateBulkAction extends TransportSimulateBulkAction { new ActionFilters(Set.of()), new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE, - indicesService + indicesService, + NamedXContentRegistry.EMPTY, + new IndexSettingProviders(Set.of()) ); } } @@ -198,17 +205,23 @@ public void onFailure(Exception e) { public void testIndexDataWithValidation() throws IOException { /* - * This test makes sure that we validate mappings if we're indexing into an index that exists. It simulates 3 cases: - * (1) An indexing request to a nonexistent index (the index is not in the cluster state) - * (2) An indexing request to an index with non-strict mappings, or an index request that is valid with respect to the mappings + * This test makes sure that we validate mappings. It simulates 7 cases: + * (1) An indexing request to an index with non-strict mappings, or an index request that is valid with respect to the mappings * (the index is in the cluster state, but our mock indicesService.withTempIndexService() does not throw an exception) - * (3) An indexing request that is invalid with respect to the mappings (the index is in the cluster state, and our mock + * (2) An indexing request that is invalid with respect to the mappings (the index is in the cluster state, and our mock * indicesService.withTempIndexService() throws an exception) + * (3) An indexing request to a nonexistent index that matches a V1 template and is valid with respect to the mappings + * (4) An indexing request to a nonexistent index that matches a V1 template and is invalid with respect to the mappings + * (5) An indexing request to a nonexistent index that matches a V2 template and is valid with respect to the mappings + * (6) An indexing request to a nonexistent index that matches a V2 template and is invalid with respect to the mappings + * (7) An indexing request to a nonexistent index that matches no templates */ Task task = mock(Task.class); // unused BulkRequest bulkRequest = new SimulateBulkRequest((Map>) null); int bulkItemCount = randomIntBetween(0, 200); Map indicesMap = new HashMap<>(); + Map v1Templates = new HashMap<>(); + Map v2Templates = new HashMap<>(); Metadata.Builder metadataBuilder = new Metadata.Builder(); Set indicesWithInvalidMappings = new HashSet<>(); for (int i = 0; i < bulkItemCount; i++) { @@ -221,23 +234,43 @@ public void testIndexDataWithValidation() throws IOException { bulkRequest.add(indexRequest); // Now we randomly decide what we're going to simulate with requests to this index: String indexName = indexRequest.index(); - switch (between(0, 2)) { + switch (between(0, 6)) { case 0 -> { - // Index does not exist, so we don't put it in the indicesMap + // Indices that have non-strict mappings, or we're sending valid requests for their mappings + indicesMap.put(indexName, newIndexMetadata(indexName)); } case 1 -> { - // Indices that have non-strict mappings, or we're sending valid requests for their mappings + // // Indices that we'll pretend to have sent invalid requests to + indicesWithInvalidMappings.add(indexName); indicesMap.put(indexName, newIndexMetadata(indexName)); } case 2 -> { - // Indices that we'll pretend to have sent invalid requests to + // Index does not exist, but matches a V1 template + v1Templates.put(indexName, newV1Template(indexName)); + } + case 3 -> { + // Index does not exist, but matches a V1 template + v1Templates.put(indexName, newV1Template(indexName)); indicesWithInvalidMappings.add(indexName); - indicesMap.put(indexName, newIndexMetadata(indexName)); + } + case 4 -> { + // Index does not exist, but matches a V2 template + v2Templates.put(indexName, newV2Template(indexName)); + } + case 5 -> { + // Index does not exist, but matches a V2 template + v2Templates.put(indexName, newV2Template(indexName)); + indicesWithInvalidMappings.add(indexName); + } + case 6 -> { + // Index does not exist, and matches no template } default -> throw new AssertionError("Illegal branch"); } } metadataBuilder.indices(indicesMap); + metadataBuilder.templates(v1Templates); + metadataBuilder.indexTemplates(v2Templates); ClusterServiceUtils.setState(clusterService, new ClusterState.Builder(clusterService.state()).metadata(metadataBuilder)); AtomicBoolean onResponseCalled = new AtomicBoolean(false); ActionListener listener = new ActionListener<>() { @@ -324,7 +357,7 @@ public void onFailure(Exception e) { fail(e, "Unexpected error"); } }; - when(indicesService.withTempIndexService(any(), any())).thenAnswer((Answer) invocation -> { + when(indicesService.withTempIndexService(any(), any())).thenAnswer((Answer) invocation -> { IndexMetadata imd = invocation.getArgument(0); if (indicesWithInvalidMappings.contains(imd.getIndex().getName())) { throw new ElasticsearchException("invalid mapping"); @@ -347,6 +380,14 @@ private IndexMetadata newIndexMetadata(String indexName) { return new IndexMetadata.Builder(indexName).settings(dummyIndexSettings).build(); } + private IndexTemplateMetadata newV1Template(String indexName) { + return new IndexTemplateMetadata.Builder(indexName).patterns(List.of(indexName)).build(); + } + + private ComposableIndexTemplate newV2Template(String indexName) { + return ComposableIndexTemplate.builder().indexPatterns(List.of(indexName)).build(); + } + private String convertMapToJsonString(Map map) throws IOException { try (XContentBuilder builder = JsonXContent.contentBuilder().map(map)) { return BytesReference.bytes(builder).utf8ToString(); From a4dba7db8dc745e207f7c0693d080c1150fbf7cc Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:19:19 -0600 Subject: [PATCH 067/115] (Doc+) Sparse Vectors NA to mapping analyzers (#112523) * retry --- docs/reference/mapping/types/sparse-vector.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index d0c2c83b8a8fa..b24f65fcf97ca 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -91,7 +91,7 @@ NOTE: `sparse_vector` fields can not be included in indices that were *created* NOTE: `sparse_vector` fields only support strictly positive values. Negative values will be rejected. -NOTE: `sparse_vector` fields do not support querying, sorting or aggregating. +NOTE: `sparse_vector` fields do not support <>, querying, sorting or aggregating. They may only be used within specialized queries. The recommended query to use on these fields are <> queries. They may also be used within legacy <> queries. From 24f33e95e8af11fa34113e05e684b8235586061c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 5 Sep 2024 08:22:48 -0700 Subject: [PATCH 068/115] Ensure rest compatibility tests are run when appropriate (#112526) --- .../LegacyYamlRestCompatTestPluginFuncTest.groovy | 5 ++--- .../main/groovy/elasticsearch.build-scan.gradle | 2 +- .../compat/AbstractYamlRestCompatTestPlugin.java | 14 +++++++++++++- modules/aggregations/build.gradle | 3 +++ modules/data-streams/build.gradle | 7 ++----- modules/ingest-common/build.gradle | 4 +++- modules/repository-url/build.gradle | 5 +++++ 7 files changed, 29 insertions(+), 11 deletions(-) diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy index b7c4908e39b62..737c448f23be6 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy @@ -55,8 +55,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest", '--stacktrace').build() then: - // we set the task to be skipped if there are no matching tests in the compatibility test sourceSet - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE result.task(transformTask).outcome == TaskOutcome.NO_SOURCE @@ -165,7 +164,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe then: result.task(':check').outcome == TaskOutcome.UP_TO_DATE result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE result.task(transformTask).outcome == TaskOutcome.NO_SOURCE diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index a6dae60ddd524..d604973efcb4b 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -41,7 +41,7 @@ develocity { if (BuildParams.inFipsJvm) { tag 'FIPS' } - println "onCI = $onCI" + if (onCI) { //Buildkite-specific build scan metadata String buildKiteUrl = System.getenv('BUILDKITE_BUILD_URL') def branch = System.getenv('BUILDKITE_PULL_REQUEST_BASE_BRANCH') ?: System.getenv('BUILDKITE_BRANCH') diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java index c6320394ef5b9..e0581ebf67081 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java @@ -35,6 +35,7 @@ import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.testing.Test; +import org.gradle.language.jvm.tasks.ProcessResources; import java.io.File; import java.nio.file.Path; @@ -213,6 +214,17 @@ public void apply(Project project) { .named(RestResourcesPlugin.COPY_YAML_TESTS_TASK) .flatMap(CopyRestTestsTask::getOutputResourceDir); + // ensure we include other non rest spec related test resources + project.getTasks() + .withType(ProcessResources.class) + .named(yamlCompatTestSourceSet.getProcessResourcesTaskName()) + .configure(processResources -> { + processResources.from( + sourceSets.getByName(YamlRestTestPlugin.YAML_REST_TEST).getResources(), + spec -> { spec.exclude("rest-api-spec/**"); } + ); + }); + // setup the test task TaskProvider yamlRestCompatTestTask = registerTestTask(project, yamlCompatTestSourceSet); yamlRestCompatTestTask.configure(testTask -> { @@ -221,7 +233,7 @@ public void apply(Project project) { testTask.setTestClassesDirs( yamlTestSourceSet.getOutput().getClassesDirs().plus(yamlCompatTestSourceSet.getOutput().getClassesDirs()) ); - testTask.onlyIf("Compatibility tests are available", t -> yamlCompatTestSourceSet.getAllSource().isEmpty() == false); + testTask.onlyIf("Compatibility tests are available", t -> yamlCompatTestSourceSet.getOutput().isEmpty() == false); testTask.setClasspath( yamlCompatTestSourceSet.getRuntimeClasspath() // remove the "normal" api and tests diff --git a/modules/aggregations/build.gradle b/modules/aggregations/build.gradle index a773c751eeaf5..91f3303d9d4a8 100644 --- a/modules/aggregations/build.gradle +++ b/modules/aggregations/build.gradle @@ -54,6 +54,9 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Filtered test", "Hybrid t-digest produces different results.") task.skipTest("search.aggregation/420_percentile_ranks_tdigest_metric/filtered", "Hybrid t-digest produces different results.") + // Something has changed with response codes + task.skipTest("search.aggregation/20_terms/IP test", "Hybrid t-digest produces different results.") + task.addAllowedWarningRegex("\\[types removal\\].*") } diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index a0375c61d7c29..daf0c188cc83e 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -1,4 +1,5 @@ import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.test-with-dependencies' apply plugin: 'elasticsearch.internal-cluster-test' @@ -23,11 +24,7 @@ dependencies { internalClusterTestImplementation project(":modules:mapper-extras") } -tasks.named('yamlRestTest') { - usesDefaultDistribution() -} - -tasks.named('javaRestTest') { +tasks.withType(StandaloneRestIntegTestTask).configureEach { usesDefaultDistribution() } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 90d52de6f0fff..d7100745680ba 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -5,6 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask + apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -29,7 +31,7 @@ restResources { } } -tasks.named('yamlRestTest') { +tasks.withType(StandaloneRestIntegTestTask).configureEach { usesDefaultDistribution() } diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 3537d430e212b..3fe2f9d9bae42 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -33,6 +33,11 @@ dependencies { internalClusterTestImplementation project(':test:fixtures:url-fixture') } +tasks.named("yamlRestTestV7CompatTransform").configure { task -> + task.skipTest("repository_url/10_basic/Restore with repository-url using file://", "Error message has changed") + task.skipTest("repository_url/10_basic/Restore with repository-url using http://", "Error message has changed") +} + tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( 'javax.servlet.ServletContextEvent', From 01fb50142e9c8ad0a2de13a2f0311c9c50604b57 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 5 Sep 2024 17:34:34 +0200 Subject: [PATCH 069/115] Speedup HealthNodeTaskExecutor (#112558) The introduction of this class introduced a significant regression in cluster state update performance and increased test execution times visibly. The `clusterHasFeature` check is very expensive, lets do it laster and do the effectively free checks first. --- .../selection/HealthNodeTaskExecutor.java | 45 +++++++++---------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java index cc908cd7cad2c..42a2854350fdb 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java @@ -157,30 +157,29 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( // visible for testing void startTask(ClusterChangedEvent event) { // Wait until every node in the cluster supports health checks - if (event.state().clusterRecovered() && featureService.clusterHasFeature(event.state(), HealthFeatures.SUPPORTS_HEALTH)) { - boolean healthNodeTaskExists = HealthNode.findTask(event.state()) != null; - boolean isElectedMaster = event.localNodeMaster(); - if (isElectedMaster && healthNodeTaskExists == false) { - persistentTasksService.sendStartRequest( - TASK_NAME, - TASK_NAME, - new HealthNodeTaskParams(), - null, - ActionListener.wrap(r -> logger.debug("Created the health node task"), e -> { - if (e instanceof NodeClosedException) { - logger.debug("Failed to create health node task because node is shutting down", e); - return; + if (event.localNodeMaster() + && event.state().clusterRecovered() + && HealthNode.findTask(event.state()) == null + && featureService.clusterHasFeature(event.state(), HealthFeatures.SUPPORTS_HEALTH)) { + persistentTasksService.sendStartRequest( + TASK_NAME, + TASK_NAME, + new HealthNodeTaskParams(), + null, + ActionListener.wrap(r -> logger.debug("Created the health node task"), e -> { + if (e instanceof NodeClosedException) { + logger.debug("Failed to create health node task because node is shutting down", e); + return; + } + Throwable t = e instanceof RemoteTransportException ? e.getCause() : e; + if (t instanceof ResourceAlreadyExistsException == false) { + logger.error("Failed to create the health node task", e); + if (enabled) { + clusterService.addListener(taskStarter); } - Throwable t = e instanceof RemoteTransportException ? e.getCause() : e; - if (t instanceof ResourceAlreadyExistsException == false) { - logger.error("Failed to create the health node task", e); - if (enabled) { - clusterService.addListener(taskStarter); - } - } - }) - ); - } + } + }) + ); } } From e2a1605e4b1b57787876e4baaaec956da6cb289f Mon Sep 17 00:00:00 2001 From: mohamedhamed-ahmed Date: Thu, 5 Sep 2024 16:38:29 +0100 Subject: [PATCH 070/115] Update resolve index missing type definition (#112509) --- .../rest-api-spec/api/indices.resolve_index.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.resolve_index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.resolve_index.json index 4ea78bfd45460..e27e3a0450bff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.resolve_index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.resolve_index.json @@ -37,6 +37,16 @@ ], "default":"open", "description":"Whether wildcard expressions should get expanded to open or closed indices (default: open)" + }, + "ignore_unavailable":{ + "type":"boolean", + "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)", + "default":false + }, + "allow_no_indices":{ + "type":"boolean", + "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)", + "default":true } } } From c11825429c7e255a961299c26558f8c4cb52903c Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Thu, 5 Sep 2024 09:53:16 -0600 Subject: [PATCH 071/115] Fix handling telemetry on compound retrievers branch (#112309) --- .../search/ccs/CCSUsageTelemetryIT.java | 17 ++ .../retriever/MinimalCompoundRetrieverIT.java | 198 ++++++++++++++++++ .../action/search/TransportSearchAction.java | 19 +- 3 files changed, 229 insertions(+), 5 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java index bb18b8f1b702d..8b7f69df9fcc3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java @@ -32,6 +32,8 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.query.SlowRunningQueryBuilder; import org.elasticsearch.search.query.ThrowingQueryBuilder; +import org.elasticsearch.search.retriever.MinimalCompoundRetrieverIT; +import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -49,6 +51,7 @@ import java.lang.annotation.Target; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -620,6 +623,20 @@ public void testPITSearch() throws ExecutionException, InterruptedException { assertThat(telemetry.getSuccessCount(), equalTo(2L)); } + public void testCompoundRetrieverSearch() throws ExecutionException, InterruptedException { + RetrieverBuilder compoundRetriever = new MinimalCompoundRetrieverIT.CompoundRetriever(Collections.emptyList()); + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(localIndex, "*:" + remoteIndex); + searchRequest.source(new SearchSourceBuilder().retriever(compoundRetriever)); + + CCSTelemetrySnapshot telemetry = getTelemetryFromSearch(searchRequest); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + } + private CCSTelemetrySnapshot getTelemetrySnapshot(String nodeName) { var usage = cluster(LOCAL_CLUSTER).getInstance(UsageService.class, nodeName); return usage.getCcsUsageHolder().getCCSTelemetrySnapshot(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java new file mode 100644 index 0000000000000..8c65d28711c1b --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.hamcrest.Matchers.equalTo; + +public class MinimalCompoundRetrieverIT extends AbstractMultiClustersTestCase { + + // CrossClusterSearchIT + private static final String REMOTE_CLUSTER = "cluster_a"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Map skipUnavailableForRemoteClusters() { + return Map.of(REMOTE_CLUSTER, randomBoolean()); + } + + @Override + protected boolean reuseClusters() { + return false; + } + + public void testSimpleSearch() throws ExecutionException, InterruptedException { + RetrieverBuilder compoundRetriever = new CompoundRetriever(Collections.emptyList()); + Map testClusterInfo = setupTwoClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + SearchRequest searchRequest = new SearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); + searchRequest.source(new SearchSourceBuilder().retriever(compoundRetriever)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + SearchResponse.Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + assertThat(response.getHits().getTotalHits().value, equalTo(testClusterInfo.get("total_docs"))); + }); + } + + private Map setupTwoClusters() { + int totalDocs = 0; + String localIndex = "demo"; + int numShardsLocal = randomIntBetween(2, 10); + Settings localSettings = indexSettings(numShardsLocal, randomIntBetween(0, 1)).build(); + assertAcked( + client(LOCAL_CLUSTER).admin() + .indices() + .prepareCreate(localIndex) + .setSettings(localSettings) + .setMapping("some_field", "type=keyword") + ); + totalDocs += indexDocs(client(LOCAL_CLUSTER), localIndex); + + String remoteIndex = "prod"; + int numShardsRemote = randomIntBetween(2, 10); + final InternalTestCluster remoteCluster = cluster(REMOTE_CLUSTER); + remoteCluster.ensureAtLeastNumDataNodes(randomIntBetween(1, 3)); + assertAcked( + client(REMOTE_CLUSTER).admin() + .indices() + .prepareCreate(remoteIndex) + .setSettings(indexSettings(numShardsRemote, randomIntBetween(0, 1))) + .setMapping("some_field", "type=keyword") + ); + assertFalse( + client(REMOTE_CLUSTER).admin() + .cluster() + .prepareHealth(remoteIndex) + .setWaitForYellowStatus() + .setTimeout(TimeValue.timeValueSeconds(10)) + .get() + .isTimedOut() + ); + totalDocs += indexDocs(client(REMOTE_CLUSTER), remoteIndex); + + String skipUnavailableKey = Strings.format("cluster.remote.%s.skip_unavailable", REMOTE_CLUSTER); + Setting skipUnavailableSetting = cluster(REMOTE_CLUSTER).clusterService().getClusterSettings().get(skipUnavailableKey); + boolean skipUnavailable = (boolean) cluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).clusterService() + .getClusterSettings() + .get(skipUnavailableSetting); + + Map clusterInfo = new HashMap<>(); + clusterInfo.put("local.num_shards", numShardsLocal); + clusterInfo.put("local.index", localIndex); + clusterInfo.put("remote.num_shards", numShardsRemote); + clusterInfo.put("remote.index", remoteIndex); + clusterInfo.put("remote.skip_unavailable", skipUnavailable); + clusterInfo.put("total_docs", (long) totalDocs); + return clusterInfo; + } + + private int indexDocs(Client client, String index) { + int numDocs = between(500, 1200); + for (int i = 0; i < numDocs; i++) { + client.prepareIndex(index).setSource("some_field", i).get(); + } + client.admin().indices().prepareRefresh(index).get(); + return numDocs; + } + + public static class CompoundRetriever extends RetrieverBuilder { + + private final List sources; + + public CompoundRetriever(List sources) { + this.sources = sources; + } + + @Override + public boolean isCompound() { + return true; + } + + @Override + public QueryBuilder topDocsQuery() { + throw new UnsupportedOperationException("should not be called"); + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (ctx.getPointInTimeBuilder() == null) { + throw new IllegalStateException("PIT is required"); + } + if (sources.isEmpty()) { + StandardRetrieverBuilder standardRetrieverBuilder = new StandardRetrieverBuilder(); + standardRetrieverBuilder.queryBuilder = new MatchAllQueryBuilder(); + return standardRetrieverBuilder; + } + return sources.get(0); + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + throw new UnsupportedOperationException("should not be called"); + } + + @Override + public String getName() { + return "compound_retriever"; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + // no-op + } + + @Override + protected boolean doEquals(Object o) { + return false; + } + + @Override + protected int doHashCode() { + return 0; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index e29b07eeffe11..23ff692da4887 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -312,8 +312,12 @@ public long buildTookInMillis() { @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { - ActionListener loggingAndMetrics = new SearchResponseActionListener((SearchTask) task, listener); - executeRequest((SearchTask) task, searchRequest, loggingAndMetrics, AsyncSearchActionProvider::new); + executeRequest( + (SearchTask) task, + searchRequest, + new SearchResponseActionListener((SearchTask) task, listener), + AsyncSearchActionProvider::new + ); } void executeRequest( @@ -498,7 +502,7 @@ void executeRequest( // We set the keep alive to -1 to indicate that we don't need the pit id in the response. // This is needed since we delete the pit prior to sending the response so the id doesn't exist anymore. source.pointInTimeBuilder(new PointInTimeBuilder(resp.getPointInTimeId()).setKeepAlive(TimeValue.MINUS_ONE)); - executeRequest(task, original, new ActionListener<>() { + var pitListener = new SearchResponseActionListener(task, listener) { @Override public void onResponse(SearchResponse response) { // we need to close the PIT first so we delay the release of the response to after the closing @@ -514,7 +518,8 @@ public void onResponse(SearchResponse response) { public void onFailure(Exception e) { closePIT(client, original.source().pointInTimeBuilder(), () -> listener.onFailure(e)); } - }, searchPhaseProvider); + }; + executeRequest(task, original, pitListener, searchPhaseProvider); })); } else { Rewriteable.rewriteAndFetch( @@ -1846,7 +1851,11 @@ private class SearchResponseActionListener implements ActionListener listener) { this.task = task; this.listener = listener; - usageBuilder = new CCSUsage.Builder(); + if (listener instanceof SearchResponseActionListener srListener) { + usageBuilder = srListener.usageBuilder; + } else { + usageBuilder = new CCSUsage.Builder(); + } } /** From 550af4fac41496057ce9c4ac4fe255ea56e5b12e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 5 Sep 2024 17:54:23 +0200 Subject: [PATCH 072/115] Stop rebuilding ClusterFeatures on every CS update (#112559) If the node-features maps are equal, then the instances are equal and there's no point in rebuilding which entails rebuilding the costly `allNodeFeatures` field in them eventually. --- .../src/main/java/org/elasticsearch/cluster/ClusterState.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 30e9a9a3779d7..02d5bdfdbebc0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -984,7 +984,9 @@ public ClusterState build() { routingTable, nodes, compatibilityVersions, - new ClusterFeatures(nodeFeatures), + previous != null && getNodeFeatures(previous.clusterFeatures).equals(nodeFeatures) + ? previous.clusterFeatures + : new ClusterFeatures(nodeFeatures), blocks, customs.build(), fromDiff, From 7800e99c865d46a736cc746b8f49221842778563 Mon Sep 17 00:00:00 2001 From: Maxim Kholod Date: Thu, 5 Sep 2024 18:10:43 +0200 Subject: [PATCH 073/115] add priviliges required for cdr misconfiguration features to work (#112456) --- .../KibanaOwnedReservedRoleDescriptors.java | 16 ++++++++++++---- .../authz/store/ReservedRolesStoreTests.java | 10 +++++++--- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 36d0240ed765b..6529d4d18fa5d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -394,12 +394,14 @@ static RoleDescriptor kibanaSystem(String name) { TransportUpdateSettingsAction.TYPE.name() ) .build(), - // For src/dest indices of the Cloud Security Posture packages that ships a + // For source indices of the Cloud Security Posture packages that ships a // transform RoleDescriptor.IndicesPrivileges.builder() .indices("logs-cloud_security_posture.findings-*", "logs-cloud_security_posture.vulnerabilities-*") .privileges("read", "view_index_metadata") .build(), + // For destination indices of the Cloud Security Posture packages that ships a + // transform RoleDescriptor.IndicesPrivileges.builder() .indices( "logs-cloud_security_posture.findings_latest-default*", @@ -415,17 +417,23 @@ static RoleDescriptor kibanaSystem(String name) { TransportUpdateSettingsAction.TYPE.name() ) .build(), + // For source indices of the Cloud Detection & Response (CDR) packages that ships a + // transform RoleDescriptor.IndicesPrivileges.builder() - .indices("logs-wiz.vulnerability-*") + .indices("logs-wiz.vulnerability-*", "logs-wiz.cloud_configuration_finding-*") .privileges("read", "view_index_metadata") .build(), + // For alias indices of the Cloud Detection & Response (CDR) packages that ships a + // transform RoleDescriptor.IndicesPrivileges.builder() // manage privilege required by the index alias - .indices("security_solution-*.vulnerability_latest") + .indices("security_solution-*.vulnerability_latest", "security_solution-*.misconfiguration_latest") .privileges("manage", TransportIndicesAliasesAction.NAME, TransportUpdateSettingsAction.TYPE.name()) .build(), + // For destination indices of the Cloud Detection & Response (CDR) packages that ships a + // transform RoleDescriptor.IndicesPrivileges.builder() - .indices("security_solution-*.vulnerability_latest-*") + .indices("security_solution-*.vulnerability_latest-*", "security_solution-*.misconfiguration_latest-*") .privileges( "create_index", "index", diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 258b2378b8a1c..be4042ae77838 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1609,8 +1609,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); - Arrays.asList("logs-wiz.vulnerability-" + randomAlphaOfLength(randomIntBetween(0, 13))).forEach((cspIndex) -> { - final IndexAbstraction indexAbstraction = mockIndexAbstraction(cspIndex); + Arrays.asList( + "logs-wiz.vulnerability-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-wiz.cloud_configuration_finding-" + randomAlphaOfLength(randomIntBetween(0, 13)) + ).forEach(indexName -> { + final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:bar").test(indexAbstraction), is(false)); assertThat( @@ -1643,7 +1646,8 @@ public void testKibanaSystemRole() { "logs-cloud_security_posture.findings_latest-default-" + Version.CURRENT, "logs-cloud_security_posture.scores-default-" + Version.CURRENT, "logs-cloud_security_posture.vulnerabilities_latest-default" + Version.CURRENT, - "security_solution-*.vulnerability_latest-" + Version.CURRENT + "security_solution-*.vulnerability_latest-" + Version.CURRENT, + "security_solution-*.misconfiguration_latest-" + Version.CURRENT ).forEach(indexName -> { logger.info("index name [{}]", indexName); final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); From 192d61dccddc6991fea7c4e84c90b3191abce5ec Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Thu, 5 Sep 2024 18:13:17 +0000 Subject: [PATCH 074/115] Bump versions after 8.15.1 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 6 +++--- .buildkite/pipelines/periodic.yml | 10 +++++----- .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 2 +- server/src/main/java/org/elasticsearch/Version.java | 1 + .../resources/org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index bb3c75f10aaea..beb45107bc313 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 12729a9b6ebda..cd0bc8449f89e 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -594,8 +594,8 @@ steps: env: BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.1 + - label: "{{matrix.image}} / 8.15.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.2 timeout_in_minutes: 300 matrix: setup: @@ -609,7 +609,7 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 8.15.1 + BWC_VERSION: 8.15.2 - label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 740fec13d1790..8f25a0fb11065 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -662,8 +662,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.1#bwcTest + - label: 8.15.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.2#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -673,7 +673,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 8.15.1 + BWC_VERSION: 8.15.2 retry: automatic: - exit_status: "-1" @@ -771,7 +771,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -821,7 +821,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index e43b3333dd755..b80309cdb3f8e 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -32,5 +32,5 @@ BWC_VERSION: - "8.12.2" - "8.13.4" - "8.14.3" - - "8.15.1" + - "8.15.2" - "8.16.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 2eea118e57e2a..e41bbac68f1ec 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - "7.17.24" - - "8.15.1" + - "8.15.2" - "8.16.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index b751daf0e2d98..54b6b1ef9c8c8 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -182,6 +182,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_14_3 = new Version(8_14_03_99); public static final Version V_8_15_0 = new Version(8_15_00_99); public static final Version V_8_15_1 = new Version(8_15_01_99); + public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version CURRENT = V_8_16_0; diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 8cd6fe9720039..88bf3232a2b17 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -127,3 +127,4 @@ 8.14.2,8636001 8.14.3,8636001 8.15.0,8702002 +8.15.1,8702002 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index df0df2d05ba5b..f89bbb5712634 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -127,3 +127,4 @@ 8.14.2,8505000 8.14.3,8505000 8.15.0,8512000 +8.15.1,8512000 From b6f8f4c2cc8bccfdff961ffa86f56cd40b15be99 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Thu, 5 Sep 2024 18:14:13 +0000 Subject: [PATCH 075/115] Prune changelogs after 8.15.1 release --- docs/changelog/111285.yaml | 5 ----- docs/changelog/111475.yaml | 6 ------ docs/changelog/111673.yaml | 5 ----- docs/changelog/111729.yaml | 6 ------ docs/changelog/111756.yaml | 6 ------ docs/changelog/111758.yaml | 6 ------ docs/changelog/111807.yaml | 5 ----- docs/changelog/111843.yaml | 5 ----- docs/changelog/111863.yaml | 6 ------ docs/changelog/111866.yaml | 6 ------ docs/changelog/111943.yaml | 6 ------ docs/changelog/111947.yaml | 5 ----- docs/changelog/111966.yaml | 5 ----- docs/changelog/111983.yaml | 6 ------ docs/changelog/111994.yaml | 6 ------ docs/changelog/112005.yaml | 6 ------ docs/changelog/112038.yaml | 6 ------ docs/changelog/112046.yaml | 5 ----- docs/changelog/112090.yaml | 6 ------ docs/changelog/112135.yaml | 4 ---- docs/changelog/112139.yaml | 6 ------ docs/changelog/112173.yaml | 7 ------- docs/changelog/112178.yaml | 6 ------ docs/changelog/112217.yaml | 5 ----- docs/changelog/112226.yaml | 6 ------ docs/changelog/112230.yaml | 5 ----- docs/changelog/112242.yaml | 5 ----- docs/changelog/112260.yaml | 6 ------ 28 files changed, 157 deletions(-) delete mode 100644 docs/changelog/111285.yaml delete mode 100644 docs/changelog/111475.yaml delete mode 100644 docs/changelog/111673.yaml delete mode 100644 docs/changelog/111729.yaml delete mode 100644 docs/changelog/111756.yaml delete mode 100644 docs/changelog/111758.yaml delete mode 100644 docs/changelog/111807.yaml delete mode 100644 docs/changelog/111843.yaml delete mode 100644 docs/changelog/111863.yaml delete mode 100644 docs/changelog/111866.yaml delete mode 100644 docs/changelog/111943.yaml delete mode 100644 docs/changelog/111947.yaml delete mode 100644 docs/changelog/111966.yaml delete mode 100644 docs/changelog/111983.yaml delete mode 100644 docs/changelog/111994.yaml delete mode 100644 docs/changelog/112005.yaml delete mode 100644 docs/changelog/112038.yaml delete mode 100644 docs/changelog/112046.yaml delete mode 100644 docs/changelog/112090.yaml delete mode 100644 docs/changelog/112135.yaml delete mode 100644 docs/changelog/112139.yaml delete mode 100644 docs/changelog/112173.yaml delete mode 100644 docs/changelog/112178.yaml delete mode 100644 docs/changelog/112217.yaml delete mode 100644 docs/changelog/112226.yaml delete mode 100644 docs/changelog/112230.yaml delete mode 100644 docs/changelog/112242.yaml delete mode 100644 docs/changelog/112260.yaml diff --git a/docs/changelog/111285.yaml b/docs/changelog/111285.yaml deleted file mode 100644 index e4856482b4d6e..0000000000000 --- a/docs/changelog/111285.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111285 -summary: "[Bugfix] Add `accessDeclaredMembers` permission to allow search application templates to parse floats" -area: Relevance -type: bug -issues: [] diff --git a/docs/changelog/111475.yaml b/docs/changelog/111475.yaml deleted file mode 100644 index 264c975444868..0000000000000 --- a/docs/changelog/111475.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111475 -summary: "ESQL: Fix for overzealous validation in case of invalid mapped fields" -area: ES|QL -type: bug -issues: - - 111452 diff --git a/docs/changelog/111673.yaml b/docs/changelog/111673.yaml deleted file mode 100644 index ebc211633fcab..0000000000000 --- a/docs/changelog/111673.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111673 -summary: Properly handle filters on `TextSimilarityRank` retriever -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/111729.yaml b/docs/changelog/111729.yaml deleted file mode 100644 index c75c14a997da9..0000000000000 --- a/docs/changelog/111729.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111729 -summary: Speed up dense/sparse vector stats -area: Vector Search -type: bug -issues: - - 111715 diff --git a/docs/changelog/111756.yaml b/docs/changelog/111756.yaml deleted file mode 100644 index e58345dbe696a..0000000000000 --- a/docs/changelog/111756.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111756 -summary: Fix `NullPointerException` when doing knn search on empty index without dims -area: Vector Search -type: bug -issues: - - 111733 diff --git a/docs/changelog/111758.yaml b/docs/changelog/111758.yaml deleted file mode 100644 index c95cdf48bc8a7..0000000000000 --- a/docs/changelog/111758.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111758 -summary: Revert "Avoid bucket copies in Aggs" -area: Aggregations -type: bug -issues: - - 111679 diff --git a/docs/changelog/111807.yaml b/docs/changelog/111807.yaml deleted file mode 100644 index 97c5e58461c34..0000000000000 --- a/docs/changelog/111807.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111807 -summary: Explain Function Score Query -area: Search -type: bug -issues: [] diff --git a/docs/changelog/111843.yaml b/docs/changelog/111843.yaml deleted file mode 100644 index c8b20036520f3..0000000000000 --- a/docs/changelog/111843.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111843 -summary: Add maximum nested depth check to WKT parser -area: Geo -type: bug -issues: [] diff --git a/docs/changelog/111863.yaml b/docs/changelog/111863.yaml deleted file mode 100644 index 1724cd83f984b..0000000000000 --- a/docs/changelog/111863.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111863 -summary: Fixing incorrect bulk request took time -area: Ingest Node -type: bug -issues: - - 111854 diff --git a/docs/changelog/111866.yaml b/docs/changelog/111866.yaml deleted file mode 100644 index 34bf56da4dc9e..0000000000000 --- a/docs/changelog/111866.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111866 -summary: Fix windows memory locking -area: Infra/Core -type: bug -issues: - - 111847 diff --git a/docs/changelog/111943.yaml b/docs/changelog/111943.yaml deleted file mode 100644 index 6b9f03ccee31c..0000000000000 --- a/docs/changelog/111943.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111943 -summary: Fix synthetic source for empty nested objects -area: Mapping -type: bug -issues: - - 111811 diff --git a/docs/changelog/111947.yaml b/docs/changelog/111947.yaml deleted file mode 100644 index 0aff0b9c7b8be..0000000000000 --- a/docs/changelog/111947.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111947 -summary: Improve performance of grok pattern cycle detection -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/111966.yaml b/docs/changelog/111966.yaml deleted file mode 100644 index facf0a61c4d8a..0000000000000 --- a/docs/changelog/111966.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111966 -summary: No error when `store_array_source` is used without synthetic source -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/111983.yaml b/docs/changelog/111983.yaml deleted file mode 100644 index d5043d0b44155..0000000000000 --- a/docs/changelog/111983.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111983 -summary: Avoid losing error message in failure collector -area: ES|QL -type: bug -issues: - - 111894 diff --git a/docs/changelog/111994.yaml b/docs/changelog/111994.yaml deleted file mode 100644 index ee62651c43987..0000000000000 --- a/docs/changelog/111994.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111994 -summary: Merge multiple ignored source entires for the same field -area: Logs -type: bug -issues: - - 111694 diff --git a/docs/changelog/112005.yaml b/docs/changelog/112005.yaml deleted file mode 100644 index 2d84381e632b3..0000000000000 --- a/docs/changelog/112005.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112005 -summary: Check for valid `parentDoc` before retrieving its previous -area: Mapping -type: bug -issues: - - 111990 diff --git a/docs/changelog/112038.yaml b/docs/changelog/112038.yaml deleted file mode 100644 index 6cbfb373b7420..0000000000000 --- a/docs/changelog/112038.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112038 -summary: Semantic reranking should fail whenever inference ID does not exist -area: Relevance -type: bug -issues: - - 111934 diff --git a/docs/changelog/112046.yaml b/docs/changelog/112046.yaml deleted file mode 100644 index f3cda1ed7a7d2..0000000000000 --- a/docs/changelog/112046.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112046 -summary: Fix calculation of parent offset for ignored source in some cases -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/112090.yaml b/docs/changelog/112090.yaml deleted file mode 100644 index 6d6e4d0851523..0000000000000 --- a/docs/changelog/112090.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112090 -summary: Always check `crsType` when folding spatial functions -area: Geo -type: bug -issues: - - 112089 diff --git a/docs/changelog/112135.yaml b/docs/changelog/112135.yaml deleted file mode 100644 index d2ff6994b6196..0000000000000 --- a/docs/changelog/112135.yaml +++ /dev/null @@ -1,4 +0,0 @@ -pr: 112135 -summary: Fix the bug where the run() function of ExecutableInferenceRequest throws an exception when get inferenceEntityId. -area: Inference -type: bug diff --git a/docs/changelog/112139.yaml b/docs/changelog/112139.yaml deleted file mode 100644 index d6d992ec1dcf2..0000000000000 --- a/docs/changelog/112139.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112139 -summary: Fix NPE when executing doc value queries over shape geometries with empty - segments -area: Geo -type: bug -issues: [] diff --git a/docs/changelog/112173.yaml b/docs/changelog/112173.yaml deleted file mode 100644 index 9a43b0d1bf1fa..0000000000000 --- a/docs/changelog/112173.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 112173 -summary: Prevent synthetic field loaders accessing stored fields from using stale - data -area: Mapping -type: bug -issues: - - 112156 diff --git a/docs/changelog/112178.yaml b/docs/changelog/112178.yaml deleted file mode 100644 index f1011291542b8..0000000000000 --- a/docs/changelog/112178.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112178 -summary: Avoid wrapping rejection exception in exchange -area: ES|QL -type: bug -issues: - - 112106 diff --git a/docs/changelog/112217.yaml b/docs/changelog/112217.yaml deleted file mode 100644 index bb367d6128001..0000000000000 --- a/docs/changelog/112217.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112217 -summary: Fix template alias parsing livelock -area: Indices APIs -type: bug -issues: [] diff --git a/docs/changelog/112226.yaml b/docs/changelog/112226.yaml deleted file mode 100644 index ac36c0c0fe4e2..0000000000000 --- a/docs/changelog/112226.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112226 -summary: "Fix \"unexpected field [remote_cluster]\" for CCS (RCS 1.0) when using API\ - \ key that references `remote_cluster`" -area: Security -type: bug -issues: [] diff --git a/docs/changelog/112230.yaml b/docs/changelog/112230.yaml deleted file mode 100644 index ef12dc3f78267..0000000000000 --- a/docs/changelog/112230.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112230 -summary: Fix connection timeout for `OpenIdConnectAuthenticator` get Userinfo -area: Security -type: bug -issues: [] diff --git a/docs/changelog/112242.yaml b/docs/changelog/112242.yaml deleted file mode 100644 index 7292a00166de2..0000000000000 --- a/docs/changelog/112242.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112242 -summary: Fix toReleaseVersion() when called on the current version id -area: Infra/Core -type: bug -issues: [111900] diff --git a/docs/changelog/112260.yaml b/docs/changelog/112260.yaml deleted file mode 100644 index 3f5642188a367..0000000000000 --- a/docs/changelog/112260.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112260 -summary: Fix DLS over Runtime Fields -area: "Authorization" -type: bug -issues: - - 111637 From c805f908897f6d772f01adf071d480f5ecbd88cb Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 5 Sep 2024 20:33:27 +0200 Subject: [PATCH 076/115] Make DfsPhase a utility class (#112553) This thing has no state, no need to have an instance for it. --- .../main/java/org/elasticsearch/search/SearchService.java | 4 +--- .../main/java/org/elasticsearch/search/dfs/DfsPhase.java | 6 ++++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 6f70938a1e5e3..cff044a829bf0 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -278,8 +278,6 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final BigArrays bigArrays; - private final DfsPhase dfsPhase = new DfsPhase(); - private final FetchPhase fetchPhase; private final RankFeatureShardPhase rankFeatureShardPhase; private volatile boolean enableSearchWorkerThreads; @@ -511,7 +509,7 @@ private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchShardT Releasable ignored = readerContext.markAsUsed(getKeepAlive(request)); SearchContext context = createContext(readerContext, request, task, ResultsType.DFS, false) ) { - dfsPhase.execute(context); + DfsPhase.execute(context); return context.dfsResult(); } catch (Exception e) { logger.trace("Dfs phase failed", e); diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index 8c40a283844b4..ac150af50fbb0 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -52,7 +52,9 @@ */ public class DfsPhase { - public void execute(SearchContext context) { + private DfsPhase() {} + + public static void execute(SearchContext context) { try { collectStatistics(context); executeKnnVectorQuery(context); @@ -65,7 +67,7 @@ public void execute(SearchContext context) { } } - private void collectStatistics(SearchContext context) throws IOException { + private static void collectStatistics(SearchContext context) throws IOException { final DfsProfiler profiler = context.getProfilers() == null ? null : context.getProfilers().getDfsProfiler(); Map fieldStatistics = new HashMap<>(); From 96d63287c020546f760d79cbf4a7fa8fdca689d7 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 6 Sep 2024 14:31:22 +1000 Subject: [PATCH 077/115] Mute org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT #111497 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 58d3060c90ad8..4c7adede9efc6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -170,6 +170,8 @@ tests: - class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests method: testClientServiceMutualAuthentication issue: https://github.com/elastic/elasticsearch/issues/112529 +- class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/111497 # Examples: # From f7639605a59e751b0f1303a165ac3a8f822958a7 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 6 Sep 2024 15:03:56 +1000 Subject: [PATCH 078/115] Mute org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT test {yaml=ingest/80_ingest_simulate/Test ingest simulate with reroute and mapping validation from templates} #112575 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4c7adede9efc6..97be7371c9bb0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -172,6 +172,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112529 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 +- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT + method: test {yaml=ingest/80_ingest_simulate/Test ingest simulate with reroute and mapping validation from templates} + issue: https://github.com/elastic/elasticsearch/issues/112575 # Examples: # From 000ebaf7c290d69bf3b47902b2048905d899bb51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 6 Sep 2024 09:13:30 +0200 Subject: [PATCH 079/115] Json parsing exceptions should not cause 500 errors (#111548) Currently we wrap JsonEOFException from advancing the json parser into our own XContentEOFException, but this has the drawback that is results in 500 errors on the client side. Instead this should be 400 errors. This changes XContentEOFException to extend XContentParseException so we report a 400 error instead. Closes #111542 --- docs/changelog/111548.yaml | 6 ++++++ .../xcontent/provider/json/JsonXContentParser.java | 3 ++- .../org/elasticsearch/xcontent/XContentEOFException.java | 8 +++----- .../test/lang_mustache/50_multi_search_template.yml | 4 ++-- .../inference/external/response/XContentUtilsTests.java | 3 +-- .../huggingface/HuggingFaceElserResponseEntityTests.java | 4 ++-- .../process/autodetect/writer/XContentRecordReader.java | 5 ++--- 7 files changed, 18 insertions(+), 15 deletions(-) create mode 100644 docs/changelog/111548.yaml diff --git a/docs/changelog/111548.yaml b/docs/changelog/111548.yaml new file mode 100644 index 0000000000000..ca9e5ae622894 --- /dev/null +++ b/docs/changelog/111548.yaml @@ -0,0 +1,6 @@ +pr: 111548 +summary: Json parsing exceptions should not cause 500 errors +area: Infra/Core +type: bug +issues: + - 111542 diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java index c8e429d4c1490..c59f003d9cb04 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java @@ -57,7 +57,8 @@ public Token nextToken() throws IOException { try { return convertToken(parser.nextToken()); } catch (JsonEOFException e) { - throw new XContentEOFException(e); + JsonLocation location = e.getLocation(); + throw new XContentEOFException(new XContentLocation(location.getLineNr(), location.getColumnNr()), "Unexpected end of file", e); } catch (JsonParseException e) { throw newXContentParseException(e); } diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentEOFException.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentEOFException.java index de9ea6fb04f26..01a2407598159 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentEOFException.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentEOFException.java @@ -8,11 +8,9 @@ package org.elasticsearch.xcontent; -import java.io.IOException; +public class XContentEOFException extends XContentParseException { -public class XContentEOFException extends IOException { - - public XContentEOFException(IOException cause) { - super(cause); + public XContentEOFException(XContentLocation location, String message, Exception cause) { + super(location, message, cause); } } diff --git a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml index 109bc8888889f..de9b3a0ec9bc2 100644 --- a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml +++ b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml @@ -114,14 +114,14 @@ setup: - match: { responses.0.hits.total: 2 } - match: { responses.1.error.root_cause.0.type: x_content_e_o_f_exception } - - match: { responses.1.error.root_cause.0.reason: "/Unexpected.end.of.input/" } + - match: { responses.1.error.root_cause.0.reason: "/\\[1:22\\].Unexpected.end.of.file/" } - match: { responses.2.hits.total: 1 } - match: { responses.3.error.root_cause.0.type: parsing_exception } - match: { responses.3.error.root_cause.0.reason: "/unknown.query.\\[unknown\\]/" } - match: { responses.4.error.root_cause.0.type: illegal_argument_exception } - match: { responses.4.error.root_cause.0.reason: "[rest_total_hits_as_int] cannot be used if the tracking of total hits is not accurate, got 1" } - match: { responses.0.status: 200 } - - match: { responses.1.status: 500 } + - match: { responses.1.status: 400 } - match: { responses.2.status: 200 } - match: { responses.3.status: 400 } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java index 4ae860f394022..360936373a010 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java @@ -103,8 +103,7 @@ public void testPositionParserAtTokenAfterField_ThrowsWithMalformedJSON() throws XContentEOFException.class, () -> XContentUtils.positionParserAtTokenAfterField(parser, missingField, errorFormat) ); - - assertThat(exception.getMessage(), containsString("Unexpected end-of-input")); + assertThat(exception.getMessage(), containsString("[4:1] Unexpected end of file")); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java index e350a539ba928..e28d4f9608ae5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java @@ -309,8 +309,8 @@ public void testFails_ResponseIsInvalidJson_MissingSquareBracket() { new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) ) ); - - assertThat(thrownException.getMessage(), containsString("expected close marker for Array (start marker at")); + assertThat(thrownException.getMessage(), containsString("[5:1] Unexpected end of file")); + assertThat(thrownException.getCause().getMessage(), containsString("expected close marker for Array (start marker at")); } public void testFails_ResponseIsInvalidJson_MissingField() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/XContentRecordReader.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/XContentRecordReader.java index 93f043bb5878b..ff55a50e46541 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/XContentRecordReader.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/XContentRecordReader.java @@ -8,7 +8,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.xcontent.XContentEOFException; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; @@ -196,7 +195,7 @@ protected void initArrays(String[] record, boolean[] gotFields) { protected XContentParser.Token tryNextTokenOrReadToEndOnError() throws IOException { try { return parser.nextToken(); - } catch (XContentEOFException | XContentParseException e) { + } catch (XContentParseException e) { logger.warn("Attempting to recover from malformed JSON data.", e); for (int i = 0; i <= nestedLevel; ++i) { readToEndOfObject(); @@ -217,7 +216,7 @@ protected void readToEndOfObject() throws IOException { do { try { token = parser.nextToken(); - } catch (XContentEOFException | XContentParseException e) { + } catch (XContentParseException e) { ++errorCounter; if (errorCounter >= PARSE_ERRORS_LIMIT) { logger.error("Failed to recover from malformed JSON data.", e); From d2433c4707a487e6927a70846dbd2724a797e1ff Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 6 Sep 2024 17:44:42 +1000 Subject: [PATCH 080/115] Mute org.elasticsearch.script.mustache.LangMustacheClientYamlTestSuiteIT test {yaml=lang_mustache/50_multi_search_template/Multi-search template with errors} #112580 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 97be7371c9bb0..a8136219b3da2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -175,6 +175,9 @@ tests: - class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT method: test {yaml=ingest/80_ingest_simulate/Test ingest simulate with reroute and mapping validation from templates} issue: https://github.com/elastic/elasticsearch/issues/112575 +- class: org.elasticsearch.script.mustache.LangMustacheClientYamlTestSuiteIT + method: test {yaml=lang_mustache/50_multi_search_template/Multi-search template with errors} + issue: https://github.com/elastic/elasticsearch/issues/112580 # Examples: # From ee68d0cb03cf304c0d12399c84aac67d1afb2b6e Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Fri, 6 Sep 2024 11:01:10 +0200 Subject: [PATCH 081/115] Bring back operator and serverless request marking (#112554) Reverts https://github.com/elastic/elasticsearch/pull/111810 --- .../rest/RestGetDataStreamsAction.java | 25 ++++---- .../elasticsearch/rest/BaseRestHandler.java | 9 ++- .../elasticsearch/rest/RestController.java | 8 +++ .../org/elasticsearch/rest/RestRequest.java | 64 +++++++++++++++++-- .../org/elasticsearch/rest/RestUtils.java | 8 ++- .../elasticsearch/rest/ServerlessScope.java | 5 -- .../elasticsearch/rest/RestRequestTests.java | 35 +++++++--- .../elasticsearch/rest/RestUtilsTests.java | 18 +++--- .../operator/OperatorOnlyRegistry.java | 20 ++---- .../security/operator/OperatorPrivileges.java | 3 + .../RestGetBuiltinPrivilegesAction.java | 9 ++- .../rest/action/role/RestGetRolesAction.java | 14 +--- .../DefaultOperatorPrivilegesTests.java | 9 ++- 13 files changed, 151 insertions(+), 76 deletions(-) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index c3fd479616319..29cda588bc26b 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestUtils; @@ -61,17 +62,19 @@ public Set supportedCapabilities() { @Override public Set supportedQueryParameters() { - return Set.of( - "name", - "include_defaults", - "timeout", - "master_timeout", - RestRequest.PATH_RESTRICTED, - IndicesOptions.WildcardOptions.EXPAND_WILDCARDS, - IndicesOptions.ConcreteTargetOptions.IGNORE_UNAVAILABLE, - IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, - IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, - "verbose" + return Sets.union( + RestRequest.INTERNAL_MARKER_REQUEST_PARAMETERS, + Set.of( + "name", + "include_defaults", + "timeout", + "master_timeout", + IndicesOptions.WildcardOptions.EXPAND_WILDCARDS, + IndicesOptions.ConcreteTargetOptions.IGNORE_UNAVAILABLE, + IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, + IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, + "verbose" + ) ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index a17bc885f6b65..6a45d1e5dc43e 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -83,7 +83,14 @@ public final void handleRequest(RestRequest request, RestChannel channel, NodeCl // check if the query has any parameters that are not in the supported set (if declared) Set supported = allSupportedParameters(); if (supported != null) { - var allSupported = Sets.union(RestResponse.RESPONSE_PARAMS, ALWAYS_SUPPORTED, supported); + var allSupported = Sets.union( + RestResponse.RESPONSE_PARAMS, + ALWAYS_SUPPORTED, + // these internal parameters cannot be set by end-users, but are used by Elasticsearch internally. + // they must be accepted by all handlers + RestRequest.INTERNAL_MARKER_REQUEST_PARAMETERS, + supported + ); if (allSupported.containsAll(request.params().keySet()) == false) { Set unsupported = Sets.difference(request.params().keySet(), allSupported); throw new IllegalArgumentException(unrecognized(request, unsupported, allSupported, "parameter")); diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 8592888d2dd03..8e9cbd686110b 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -480,6 +480,14 @@ private void dispatchRequest( } else { threadContext.putHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, Boolean.TRUE.toString()); } + + if (apiProtections.isEnabled()) { + // API protections are only enabled in serverless; therefore we can use this as an indicator to mark the + // request as a serverless mode request here, so downstream handlers can use the marker + request.markAsServerlessRequest(); + logger.trace("Marked request for uri [{}] as serverless request", request.uri()); + } + final var finalChannel = responseChannel; this.interceptor.intercept(request, responseChannel, handler.getConcreteRestHandler(), new ActionListener<>() { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index 66ba0c743813e..96f2c2d10dc96 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -48,7 +48,31 @@ public class RestRequest implements ToXContent.Params, Traceable { - public static final String PATH_RESTRICTED = "pathRestricted"; + /** + * Internal marker request parameter to indicate that a request was made in serverless mode. Use this parameter, together with + * {@link #OPERATOR_REQUEST} if you need to toggle behavior for serverless, for example to enforce partial API restrictions + * (prevent request fields, omit response fields) for an API. + * Requests not made in serverless mode, will *not* have this parameter set. + * Given a request instance, you can use {@link #isServerlessRequest()} to determine if the parameter is set or not. + * This is also available from {@code ToXContent.Params}. For example: + * {@code params.paramAsBoolean(RestRequest.SERVERLESS_REQUEST, false)} + */ + public static final String SERVERLESS_REQUEST = "serverlessRequest"; + /** + * Internal marker request parameter to indicate that a request was made by an operator user. + * Requests made by regular users (users without operator privileges), will *not* have this parameter set. + * Given a request instance, you can use {@link #isOperatorRequest()} to determine if the parameter is set or not. + * This is also available from {@code ToXContent.Params}. For example: + * {@code params.paramAsBoolean(RestRequest.OPERATOR_REQUEST, false)} + */ + public static final String OPERATOR_REQUEST = "operatorRequest"; + + /** + * Internal request parameters used as markers to indicate various operations modes such as serverless mode, or operator mode. + * These can never be set directly by end-users. Instead, they are set internally by Elasticsearch and must be supported by all + * request handlers. + */ + public static final Set INTERNAL_MARKER_REQUEST_PARAMETERS = Set.of(SERVERLESS_REQUEST, OPERATOR_REQUEST); // tchar pattern as defined by RFC7230 section 3.2.6 private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+"); @@ -616,13 +640,41 @@ public boolean hasExplicitRestApiVersion() { return restApiVersion.isPresent(); } - public void markPathRestricted(String restriction) { - if (params.containsKey(PATH_RESTRICTED)) { - throw new IllegalArgumentException("The parameter [" + PATH_RESTRICTED + "] is already defined."); + /** + * See {@link #SERVERLESS_REQUEST} + */ + public void markAsServerlessRequest() { + setParamTrueOnceAndConsume(SERVERLESS_REQUEST); + } + + /** + * See {@link #SERVERLESS_REQUEST} + */ + public boolean isServerlessRequest() { + return paramAsBoolean(SERVERLESS_REQUEST, false); + } + + /** + * See {@link #OPERATOR_REQUEST} + */ + public void markAsOperatorRequest() { + setParamTrueOnceAndConsume(OPERATOR_REQUEST); + } + + /** + * See {@link #OPERATOR_REQUEST} + */ + public boolean isOperatorRequest() { + return paramAsBoolean(OPERATOR_REQUEST, false); + } + + private void setParamTrueOnceAndConsume(String param) { + if (params.containsKey(param)) { + throw new IllegalArgumentException("The parameter [" + param + "] is already defined."); } - params.put(PATH_RESTRICTED, restriction); + params.put(param, "true"); // this parameter is intended be consumed via ToXContent.Params.param(..), not this.params(..) so don't require it is consumed here - consumedParams.add(PATH_RESTRICTED); + consumedParams.add(param); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/RestUtils.java b/server/src/main/java/org/elasticsearch/rest/RestUtils.java index 0e7200fa83b1c..681f4c33eb77c 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestUtils.java +++ b/server/src/main/java/org/elasticsearch/rest/RestUtils.java @@ -23,7 +23,7 @@ import java.util.regex.Pattern; import static org.elasticsearch.action.support.master.AcknowledgedRequest.DEFAULT_ACK_TIMEOUT; -import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; +import static org.elasticsearch.rest.RestRequest.INTERNAL_MARKER_REQUEST_PARAMETERS; public class RestUtils { @@ -85,8 +85,10 @@ private static String decodeQueryStringParam(final String s) { } private static void addParam(Map params, String name, String value) { - if (PATH_RESTRICTED.equalsIgnoreCase(name)) { - throw new IllegalArgumentException("parameter [" + PATH_RESTRICTED + "] is reserved and may not set"); + for (var reservedParameter : INTERNAL_MARKER_REQUEST_PARAMETERS) { + if (reservedParameter.equalsIgnoreCase(name)) { + throw new IllegalArgumentException("parameter [" + name + "] is reserved and may not be set"); + } } params.put(name, value); } diff --git a/server/src/main/java/org/elasticsearch/rest/ServerlessScope.java b/server/src/main/java/org/elasticsearch/rest/ServerlessScope.java index 34aa04c5e484b..8a078db7dc012 100644 --- a/server/src/main/java/org/elasticsearch/rest/ServerlessScope.java +++ b/server/src/main/java/org/elasticsearch/rest/ServerlessScope.java @@ -22,9 +22,4 @@ @Target(ElementType.TYPE) public @interface ServerlessScope { Scope value(); - - /** - * A value used when restricting a response of a serverless endpoints. - */ - String SERVERLESS_RESTRICTION = "serverless"; } diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index bb06dbe5d09aa..ae88215f951de 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -31,7 +31,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; -import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; +import static org.elasticsearch.rest.RestRequest.OPERATOR_REQUEST; +import static org.elasticsearch.rest.RestRequest.SERVERLESS_REQUEST; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -249,16 +250,30 @@ public void testRequiredContent() { assertEquals("unknown content type", e.getMessage()); } - public void testMarkPathRestricted() { + public void testIsServerlessRequest() { RestRequest request1 = contentRestRequest("content", new HashMap<>()); - request1.markPathRestricted("foo"); - assertEquals(request1.param(PATH_RESTRICTED), "foo"); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> request1.markPathRestricted("foo")); - assertThat(exception.getMessage(), is("The parameter [" + PATH_RESTRICTED + "] is already defined.")); - - RestRequest request2 = contentRestRequest("content", Map.of(PATH_RESTRICTED, "foo")); - exception = expectThrows(IllegalArgumentException.class, () -> request2.markPathRestricted("bar")); - assertThat(exception.getMessage(), is("The parameter [" + PATH_RESTRICTED + "] is already defined.")); + request1.markAsServerlessRequest(); + assertEquals(request1.param(SERVERLESS_REQUEST), "true"); + assertTrue(request1.isServerlessRequest()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, request1::markAsServerlessRequest); + assertThat(exception.getMessage(), is("The parameter [" + SERVERLESS_REQUEST + "] is already defined.")); + + RestRequest request2 = contentRestRequest("content", Map.of(SERVERLESS_REQUEST, "true")); + exception = expectThrows(IllegalArgumentException.class, request2::markAsServerlessRequest); + assertThat(exception.getMessage(), is("The parameter [" + SERVERLESS_REQUEST + "] is already defined.")); + } + + public void testIsOperatorRequest() { + RestRequest request1 = contentRestRequest("content", new HashMap<>()); + request1.markAsOperatorRequest(); + assertEquals(request1.param(OPERATOR_REQUEST), "true"); + assertTrue(request1.isOperatorRequest()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, request1::markAsOperatorRequest); + assertThat(exception.getMessage(), is("The parameter [" + OPERATOR_REQUEST + "] is already defined.")); + + RestRequest request2 = contentRestRequest("content", Map.of(OPERATOR_REQUEST, "true")); + exception = expectThrows(IllegalArgumentException.class, request2::markAsOperatorRequest); + assertThat(exception.getMessage(), is("The parameter [" + OPERATOR_REQUEST + "] is already defined.")); } public static RestRequest contentRestRequest(String content, Map params) { diff --git a/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java b/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java index 3226ca2bf51d2..24d40fd1b95fd 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java @@ -18,7 +18,7 @@ import java.util.Map; import java.util.regex.Pattern; -import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; +import static org.elasticsearch.rest.RestRequest.INTERNAL_MARKER_REQUEST_PARAMETERS; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -160,13 +160,15 @@ public void testCrazyURL() { } public void testReservedParameters() { - Map params = new HashMap<>(); - String uri = "something?" + PATH_RESTRICTED + "=value"; - IllegalArgumentException exception = expectThrows( - IllegalArgumentException.class, - () -> RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params) - ); - assertEquals(exception.getMessage(), "parameter [" + PATH_RESTRICTED + "] is reserved and may not set"); + for (var reservedParam : INTERNAL_MARKER_REQUEST_PARAMETERS) { + Map params = new HashMap<>(); + String uri = "something?" + reservedParam + "=value"; + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params) + ); + assertEquals(exception.getMessage(), "parameter [" + reservedParam + "] is reserved and may not be set"); + } } private void assertCorsSettingRegexIsNull(String settingsValue) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorOnlyRegistry.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorOnlyRegistry.java index f0889f1c48c75..ef3070f0bd787 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorOnlyRegistry.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorOnlyRegistry.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.security.operator; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.TransportRequest; @@ -23,21 +22,10 @@ public interface OperatorOnlyRegistry { OperatorPrivilegesViolation check(String action, TransportRequest request); /** - * Checks to see if a given {@link RestHandler} is subject to operator-only restrictions for the REST API. - * - * Any REST API may be fully or partially restricted. - * A fully restricted REST API mandates that the implementation of this method throw an - * {@link org.elasticsearch.ElasticsearchStatusException} with an appropriate status code and error message. - * - * A partially restricted REST API mandates that the {@link RestRequest} is marked as restricted so that the downstream handler can - * behave appropriately. - * For example, to restrict the REST response the implementation - * should call {@link RestRequest#markPathRestricted(String)} so that the downstream handler can properly restrict the response - * before returning to the client. Note - a partial restriction should not throw an exception. - * - * @param restHandler The {@link RestHandler} to check for any restrictions - * @param restRequest The {@link RestRequest} to check for any restrictions and mark any partially restricted REST API's - * @throws ElasticsearchStatusException if the request should be denied in its entirety (fully restricted) + * This method is only called if the user is not an operator. + * Implementations should fail the request if the {@link RestRequest} is not allowed to proceed by throwing an + * {@link org.elasticsearch.ElasticsearchException}. If the request should be handled by the associated {@link RestHandler}, + * then this implementations should do nothing. */ void checkRest(RestHandler restHandler, RestRequest restRequest) throws ElasticsearchException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java index 79c529eb3d7b1..9ef41fad12401 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java @@ -182,6 +182,9 @@ public boolean checkRest(RestHandler restHandler, RestRequest restRequest, RestC ); throw e; } + } else { + restRequest.markAsOperatorRequest(); + logger.trace("Marked request for uri [{}] as operator request", restRequest.uri()); } return true; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetBuiltinPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetBuiltinPrivilegesAction.java index 5f0657079e694..e0ef46dc73a18 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetBuiltinPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetBuiltinPrivilegesAction.java @@ -85,9 +85,9 @@ public RestResponse buildResponse(GetBuiltinPrivilegesResponse response, XConten @Override protected Exception innerCheckFeatureAvailable(RestRequest request) { - final boolean restrictPath = request.hasParam(RestRequest.PATH_RESTRICTED); - assert false == restrictPath || DiscoveryNode.isStateless(settings); - if (false == restrictPath) { + final boolean shouldRestrictForServerless = shouldRestrictForServerless(request); + assert false == shouldRestrictForServerless || DiscoveryNode.isStateless(settings); + if (false == shouldRestrictForServerless) { return super.innerCheckFeatureAvailable(request); } // This is a temporary hack: we are re-using the native roles setting as an overall feature flag for custom roles. @@ -106,4 +106,7 @@ protected Exception innerCheckFeatureAvailable(RestRequest request) { } } + private boolean shouldRestrictForServerless(RestRequest request) { + return request.isServerlessRequest() && false == request.isOperatorRequest(); + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java index 232d74d16725d..dc9ecbbc63a8d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.rest.action.role; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.RestApiVersion; @@ -54,9 +53,9 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { final String[] roles = request.paramAsStringArray("name", Strings.EMPTY_ARRAY); - final boolean restrictRequest = isPathRestricted(request); + final boolean restrictToNativeRolesOnly = request.isServerlessRequest() && false == request.isOperatorRequest(); return channel -> new GetRolesRequestBuilder(client).names(roles) - .nativeOnly(restrictRequest) + .nativeOnly(restrictToNativeRolesOnly) .execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(GetRolesResponse response, XContentBuilder builder) throws Exception { @@ -84,17 +83,10 @@ protected Exception innerCheckFeatureAvailable(RestRequest request) { // Note: For non-restricted requests this action handles both reserved roles and native // roles, and should still be available even if native role management is disabled. // For restricted requests it should only be available if native role management is enabled - final boolean restrictPath = isPathRestricted(request); - if (false == restrictPath) { + if (false == request.isServerlessRequest() || request.isOperatorRequest()) { return null; } else { return super.innerCheckFeatureAvailable(request); } } - - private boolean isPathRestricted(RestRequest request) { - final boolean restrictRequest = request.hasParam(RestRequest.PATH_RESTRICTED); - assert false == restrictRequest || DiscoveryNode.isStateless(settings); - return restrictRequest; - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java index aa95ea097413c..a5dabe8c2dd82 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java @@ -89,7 +89,7 @@ public void testWillNotCheckWhenLicenseDoesNotSupport() { verifyNoMoreInteractions(operatorOnlyRegistry); } - public void testMarkOperatorUser() throws IllegalAccessException { + public void testMarkOperatorUser() { final Settings settings = Settings.builder().put("xpack.security.operator_privileges.enabled", true).build(); when(xPackLicenseState.isAllowed(Security.OPERATOR_PRIVILEGES_FEATURE)).thenReturn(true); final User operatorUser = new User("operator_user"); @@ -204,7 +204,7 @@ public void testCheckWillPassForInternalUsersBecauseTheyHaveOperatorPrivileges() verify(operatorOnlyRegistry, never()).check(anyString(), any()); } - public void testMaybeInterceptRequest() throws IllegalAccessException { + public void testMaybeInterceptRequest() { final boolean licensed = randomBoolean(); when(xPackLicenseState.isAllowed(Security.OPERATOR_PRIVILEGES_FEATURE)).thenReturn(licensed); @@ -279,11 +279,16 @@ public void testCheckRest() { ); assertThat(ex, instanceOf(ElasticsearchSecurityException.class)); assertThat(ex, throwableWithMessage("violation!")); + verify(restRequest, never()).markAsOperatorRequest(); Mockito.clearInvocations(operatorOnlyRegistry); + Mockito.clearInvocations(restRequest); // is an operator threadContext.putHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY, AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR); verifyNoInteractions(operatorOnlyRegistry); assertTrue(operatorPrivilegesService.checkRest(restHandler, restRequest, restChannel, threadContext)); + verify(restRequest, times(1)).markAsOperatorRequest(); + Mockito.clearInvocations(operatorOnlyRegistry); + Mockito.clearInvocations(restRequest); } } From b2bc8c598c8ed354b18225c96eb2f4fc29044615 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Fri, 6 Sep 2024 11:28:18 +0200 Subject: [PATCH 082/115] Unsupported locale in Kerb test (#112582) More like: https://github.com/elastic/elasticsearch/pull/112555 Closes: https://github.com/elastic/elasticsearch/issues/112529 --- muted-tests.yml | 3 --- .../xpack/security/authc/kerberos/KerberosTestCase.java | 3 ++- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index a8136219b3da2..5ace8dfc7c2a8 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -167,9 +167,6 @@ tests: - class: org.elasticsearch.xpack.inference.external.http.RequestBasedTaskRunnerTests method: testLoopOneAtATime issue: https://github.com/elastic/elasticsearch/issues/112471 -- class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests - method: testClientServiceMutualAuthentication - issue: https://github.com/elastic/elasticsearch/issues/112529 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 - class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index cd17723c2635c..229b6e2a8f92d 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -87,7 +87,8 @@ public abstract class KerberosTestCase extends ESTestCase { "mni", "sat", "sa", - "bgc" + "bgc", + "raj" ); @BeforeClass From 466c85b4136be5e8da4cf7a05dd684a56f92bdb1 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Fri, 6 Sep 2024 12:09:37 +0200 Subject: [PATCH 083/115] SQL: Fix UCASE/LCASE tests (#112585) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tests for upper/lower case functions (`UCASE()`/`LCASE()`) are not completely deterministic, due to Locale randomization. In particular, when using Turkish locale all the UCASE(`i`) will result in `İ` instead of `I` This change makes two tests more deterministic by including only values that don't include a `i` Fixes https://github.com/elastic/elasticsearch/issues/112535 --- .../sql/qa/server/src/main/resources/case-functions.sql-spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/sql/qa/server/src/main/resources/case-functions.sql-spec b/x-pack/plugin/sql/qa/server/src/main/resources/case-functions.sql-spec index 2a856c6d08400..d7de6d8a4a173 100644 --- a/x-pack/plugin/sql/qa/server/src/main/resources/case-functions.sql-spec +++ b/x-pack/plugin/sql/qa/server/src/main/resources/case-functions.sql-spec @@ -21,7 +21,7 @@ ucaseInline3 SELECT UCASE(' elastic ') upper; multipleGroupingsAndOrderingByGroupsWithFunctions_1 -SELECT first_name f, last_name l, gender g, CONCAT(UCASE(first_name), LCASE(last_name)) c FROM test_emp GROUP BY gender, l, f, c ORDER BY c DESC, first_name, l ASC, g; +SELECT first_name f, last_name l, gender g, CONCAT(UCASE(first_name), LCASE(last_name)) c FROM test_emp WHERE emp_no IN (10003, 10009, 10023, 10024, 10025, 10029, 10030, 10033, 10034, 10035) GROUP BY gender, l, f, c ORDER BY c DESC, first_name, l ASC, g; multipleGroupingsAndOrderingByGroupsWithFunctions_2 -SELECT first_name f, last_name l, LCASE(gender) g, CONCAT(UCASE(first_name), LCASE(last_name)) c FROM test_emp GROUP BY f, LCASE(gender), l, c ORDER BY c DESC, first_name, l ASC, g; +SELECT first_name f, last_name l, LCASE(gender) g, CONCAT(UCASE(first_name), LCASE(last_name)) c FROM test_emp WHERE emp_no IN (10003, 10009, 10023, 10024, 10025, 10029, 10030, 10033, 10034, 10035) GROUP BY f, LCASE(gender), l, c ORDER BY c DESC, first_name, l ASC, g; From f8a7822a815ffe9dd49b02fdcccbff4112bd65b7 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Fri, 6 Sep 2024 12:22:42 +0200 Subject: [PATCH 084/115] Fix ES|QL hyperbolic functions docs (#112562) * Fix ES|QL hyperbolic functions docs * Make sin/cos/tan docs more consistent --- .../src/main/resources/meta.csv-spec | 28 +++++++++---------- .../expression/function/scalar/math/Cosh.java | 7 +++-- .../expression/function/scalar/math/Sin.java | 2 +- .../expression/function/scalar/math/Sinh.java | 6 ++-- .../expression/function/scalar/math/Tan.java | 2 +- .../expression/function/scalar/math/Tanh.java | 6 ++-- 6 files changed, 26 insertions(+), 25 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 325b984c36d34..5cb174f9777f5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -18,7 +18,7 @@ synopsis:keyword "boolean|cartesian_point|cartesian_shape|date|geo_point|geo_shape|integer|ip|keyword|long|text|version coalesce(first:boolean|cartesian_point|cartesian_shape|date|geo_point|geo_shape|integer|ip|keyword|long|text|version, ?rest...:boolean|cartesian_point|cartesian_shape|date|geo_point|geo_shape|integer|ip|keyword|long|text|version)" "keyword concat(string1:keyword|text, string2...:keyword|text)" "double cos(angle:double|integer|long|unsigned_long)" -"double cosh(angle:double|integer|long|unsigned_long)" +"double cosh(number:double|integer|long|unsigned_long)" "long count(?field:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" "long count_distinct(field:boolean|date|double|integer|ip|keyword|long|text|version, ?precision:integer|long|unsigned_long)" "integer date_diff(unit:keyword|text, startTimestamp:date, endTimestamp:date)" @@ -71,7 +71,7 @@ double pi() "keyword|text rtrim(string:keyword|text)" "double signum(number:double|integer|long|unsigned_long)" "double sin(angle:double|integer|long|unsigned_long)" -"double sinh(angle:double|integer|long|unsigned_long)" +"double sinh(number:double|integer|long|unsigned_long)" "keyword split(string:keyword|text, delim:keyword|text)" "double sqrt(number:double|integer|long|unsigned_long)" "geo_point|cartesian_point st_centroid_agg(field:geo_point|cartesian_point)" @@ -86,7 +86,7 @@ double pi() "keyword substring(string:keyword|text, start:integer, ?length:integer)" "long|double sum(number:double|integer|long)" "double tan(angle:double|integer|long|unsigned_long)" -"double tanh(angle:double|integer|long|unsigned_long)" +"double tanh(number:double|integer|long|unsigned_long)" double tau() "keyword to_base64(string:keyword|text)" "boolean to_bool(field:boolean|keyword|text|double|long|unsigned_long|integer)" @@ -142,7 +142,7 @@ cidr_match |[ip, blockX] |[ip, "keyword|text"] coalesce |first |"boolean|cartesian_point|cartesian_shape|date|geo_point|geo_shape|integer|ip|keyword|long|text|version" |Expression to evaluate. concat |[string1, string2] |["keyword|text", "keyword|text"] |[Strings to concatenate., Strings to concatenate.] cos |angle |"double|integer|long|unsigned_long" |An angle, in radians. If `null`, the function returns `null`. -cosh |angle |"double|integer|long|unsigned_long" |An angle, in radians. If `null`, the function returns `null`. +cosh |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. count |field |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" |Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows). count_distinct|[field, precision] |["boolean|date|double|integer|ip|keyword|long|text|version", "integer|long|unsigned_long"] |[Column or literal for which to count the number of distinct values., Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000.] date_diff |[unit, startTimestamp, endTimestamp]|["keyword|text", date, date] |[Time difference unit, A string representing a start timestamp, A string representing an end timestamp] @@ -195,7 +195,7 @@ round |[number, decimals] |["double|integer|long|unsign rtrim |string |"keyword|text" |String expression. If `null`, the function returns `null`. signum |number |"double|integer|long|unsigned_long" |"Numeric expression. If `null`, the function returns `null`." sin |angle |"double|integer|long|unsigned_long" |An angle, in radians. If `null`, the function returns `null`. -sinh |angle |"double|integer|long|unsigned_long" |An angle, in radians. If `null`, the function returns `null`. +sinh |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. split |[string, delim] |["keyword|text", "keyword|text"] |[String expression. If `null`\, the function returns `null`., Delimiter. Only single byte delimiters are currently supported.] sqrt |number |"double|integer|long|unsigned_long" |"Numeric expression. If `null`, the function returns `null`." st_centroid_ag|field |"geo_point|cartesian_point" |[""] @@ -210,7 +210,7 @@ starts_with |[str, prefix] |["keyword|text", "keyword|te substring |[string, start, length] |["keyword|text", integer, integer] |[String expression. If `null`\, the function returns `null`., Start position., Length of the substring from the start position. Optional; if omitted\, all positions after `start` are returned.] sum |number |"double|integer|long" |[""] tan |angle |"double|integer|long|unsigned_long" |An angle, in radians. If `null`, the function returns `null`. -tanh |angle |"double|integer|long|unsigned_long" |An angle, in radians. If `null`, the function returns `null`. +tanh |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. tau |null |null |null to_base64 |string |"keyword|text" |A string. to_bool |field |"boolean|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. @@ -266,7 +266,7 @@ cidr_match |Returns true if the provided IP is contained in one of the provid coalesce |Returns the first of its arguments that is not null. If all arguments are null, it returns `null`. concat |Concatenates two or more strings. cos |Returns the {wikipedia}/Sine_and_cosine[cosine] of an angle. -cosh |Returns the {wikipedia}/Hyperbolic_functions[hyperbolic cosine] of an angle. +cosh |Returns the {wikipedia}/Hyperbolic_functions[hyperbolic cosine] of a number. count |Returns the total number (count) of input values. count_distinct|Returns the approximate number of distinct values. date_diff |Subtracts the `startTimestamp` from the `endTimestamp` and returns the difference in multiples of `unit`. If `startTimestamp` is later than the `endTimestamp`, negative values are returned. @@ -318,8 +318,8 @@ right |Return the substring that extracts 'length' chars from 'str' star round |Rounds a number to the specified number of decimal places. Defaults to 0, which returns the nearest integer. If the precision is a negative number, rounds to the number of digits left of the decimal point. rtrim |Removes trailing whitespaces from a string. signum |Returns the sign of the given number. It returns `-1` for negative numbers, `0` for `0` and `1` for positive numbers. -sin |Returns ths {wikipedia}/Sine_and_cosine[Sine] trigonometric function of an angle. -sinh |Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of an angle. +sin |Returns the {wikipedia}/Sine_and_cosine[sine] of an angle. +sinh |Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of a number. split |Split a single valued string into multiple strings. sqrt |Returns the square root of a number. The input can be any numeric value, the return value is always a double. Square roots of negative numbers and infinities are null. st_centroid_ag|Calculate the spatial centroid over a field with spatial point geometry type. @@ -333,8 +333,8 @@ st_y |Extracts the `y` coordinate from the supplied point. If the point starts_with |Returns a boolean that indicates whether a keyword string starts with another string. substring |Returns a substring of a string, specified by a start position and an optional length. sum |The sum of a numeric expression. -tan |Returns the {wikipedia}/Sine_and_cosine[Tangent] trigonometric function of an angle. -tanh |Returns the {wikipedia}/Hyperbolic_functions[Tangent] hyperbolic function of an angle. +tan |Returns the {wikipedia}/Sine_and_cosine[tangent] of an angle. +tanh |Returns the {wikipedia}/Hyperbolic_functions[hyperbolic tangent] of a number. tau |Returns the https://tauday.com/tau-manifesto[ratio] of a circle's circumference to its radius. to_base64 |Encode a string to a base64 string. to_bool |Converts an input value to a boolean value. A string value of *true* will be case-insensitive converted to the Boolean *true*. For anything else, including the empty string, the function will return *false*. The numerical value of *0* will be converted to *false*, anything else will be converted to *true*. @@ -499,9 +499,9 @@ META FUNCTIONS | WHERE STARTS_WITH(name, "sin") ; -name:keyword | synopsis:keyword |argNames:keyword | argTypes:keyword | argDescriptions:keyword | returnType:keyword | description:keyword | optionalArgs:boolean | variadic:boolean | isAggregation:boolean -sin |"double sin(angle:double|integer|long|unsigned_long)" |angle |"double|integer|long|unsigned_long" | "An angle, in radians. If `null`, the function returns `null`." | double | "Returns ths {wikipedia}/Sine_and_cosine[Sine] trigonometric function of an angle." | false | false | false -sinh |"double sinh(angle:double|integer|long|unsigned_long)" |angle |"double|integer|long|unsigned_long" | "An angle, in radians. If `null`, the function returns `null`." | double | "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of an angle." | false | false | false +name:keyword | synopsis:keyword |argNames:keyword | argTypes:keyword | argDescriptions:keyword | returnType:keyword | description:keyword | optionalArgs:boolean | variadic:boolean | isAggregation:boolean +sin |"double sin(angle:double|integer|long|unsigned_long)" |angle |"double|integer|long|unsigned_long" | "An angle, in radians. If `null`, the function returns `null`." | double | "Returns the {wikipedia}/Sine_and_cosine[sine] of an angle." | false | false | false +sinh |"double sinh(number:double|integer|long|unsigned_long)" |number |"double|integer|long|unsigned_long" | "Numeric expression. If `null`, the function returns `null`." | double | "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of a number." | false | false | false ; countFunctions#[skip:-8.15.99] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cosh.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cosh.java index 0cfbc195186fe..721d9e9438a8d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cosh.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cosh.java @@ -29,15 +29,16 @@ public class Cosh extends AbstractTrigonometricFunction { @FunctionInfo( returnType = "double", - description = "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic cosine] of an angle.", + description = "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic cosine] of a number.", examples = @Example(file = "floats", tag = "cosh") ) public Cosh( Source source, @Param( - name = "angle", + name = "number", type = { "double", "integer", "long", "unsigned_long" }, - description = "An angle, in radians. If `null`, the function returns `null`." + description = "Numeric expression. If `null`, the function returns `null`." + ) Expression angle ) { super(source, angle); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sin.java index 526b17fb3eb2e..b793d1972e004 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sin.java @@ -29,7 +29,7 @@ public class Sin extends AbstractTrigonometricFunction { @FunctionInfo( returnType = "double", - description = "Returns ths {wikipedia}/Sine_and_cosine[Sine] trigonometric function of an angle.", + description = "Returns the {wikipedia}/Sine_and_cosine[sine] of an angle.", examples = @Example(file = "floats", tag = "sin") ) public Sin( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sinh.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sinh.java index f89e626955d7e..74c9bfe4826b6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sinh.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sinh.java @@ -29,15 +29,15 @@ public class Sinh extends AbstractTrigonometricFunction { @FunctionInfo( returnType = "double", - description = "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of an angle.", + description = "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of a number.", examples = @Example(file = "floats", tag = "sinh") ) public Sinh( Source source, @Param( - name = "angle", + name = "number", type = { "double", "integer", "long", "unsigned_long" }, - description = "An angle, in radians. If `null`, the function returns `null`." + description = "Numeric expression. If `null`, the function returns `null`." ) Expression angle ) { super(source, angle); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tan.java index 85cdba0db4682..44e61849c4496 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tan.java @@ -29,7 +29,7 @@ public class Tan extends AbstractTrigonometricFunction { @FunctionInfo( returnType = "double", - description = "Returns the {wikipedia}/Sine_and_cosine[Tangent] trigonometric function of an angle.", + description = "Returns the {wikipedia}/Sine_and_cosine[tangent] of an angle.", examples = @Example(file = "floats", tag = "tan") ) public Tan( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tanh.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tanh.java index 0cd4051968c79..bc203acc33a52 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tanh.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tanh.java @@ -29,15 +29,15 @@ public class Tanh extends AbstractTrigonometricFunction { @FunctionInfo( returnType = "double", - description = "Returns the {wikipedia}/Hyperbolic_functions[Tangent] hyperbolic function of an angle.", + description = "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic tangent] of a number.", examples = @Example(file = "floats", tag = "tanh") ) public Tanh( Source source, @Param( - name = "angle", + name = "number", type = { "double", "integer", "long", "unsigned_long" }, - description = "An angle, in radians. If `null`, the function returns `null`." + description = "Numeric expression. If `null`, the function returns `null`." ) Expression angle ) { super(source, angle); From cd39fc8e551c56623c00e7e9c88cfdbbe75cfeba Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 6 Sep 2024 20:32:22 +1000 Subject: [PATCH 085/115] Mute org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests testClientServiceMutualAuthentication #112529 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5ace8dfc7c2a8..643bcca3d3e9c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -175,6 +175,9 @@ tests: - class: org.elasticsearch.script.mustache.LangMustacheClientYamlTestSuiteIT method: test {yaml=lang_mustache/50_multi_search_template/Multi-search template with errors} issue: https://github.com/elastic/elasticsearch/issues/112580 +- class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests + method: testClientServiceMutualAuthentication + issue: https://github.com/elastic/elasticsearch/issues/112529 # Examples: # From f3bc281978c2d529e6579914cded6f033f317a37 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 6 Sep 2024 14:16:17 +0300 Subject: [PATCH 086/115] Refactor build params for FieldMapper, adding SourceKeepMode (#112455) * Refactor build params for FieldMapper * more mappers and tests * more mappers * more mappers * spotless * spotless * stored by default * Revert "stored by default" This reverts commit bbd247d64ba87387e16bfdfb7a83145cf9e1e933. * restore storeIgnored * sync * list valid values for SourceKeepMode * small refactoring * spotless --- .../mapper/LegacyGeoShapeFieldMapper.java | 8 +- .../extras/MatchOnlyTextFieldMapper.java | 11 +-- .../mapper/extras/RankFeatureFieldMapper.java | 8 +- .../extras/RankFeaturesFieldMapper.java | 8 +- .../mapper/extras/ScaledFloatFieldMapper.java | 16 +-- .../extras/SearchAsYouTypeFieldMapper.java | 12 +-- .../mapper/extras/TokenCountFieldMapper.java | 12 +-- .../join/mapper/ParentIdFieldMapper.java | 2 +- .../join/mapper/ParentJoinFieldMapper.java | 2 +- .../percolator/PercolatorFieldMapper.java | 10 +- .../icu/ICUCollationKeywordFieldMapper.java | 15 +-- .../AnnotatedTextFieldMapper.java | 14 ++- .../mapper/murmur3/Murmur3FieldMapper.java | 7 +- .../mapper/AbstractGeometryFieldMapper.java | 21 +--- .../AbstractPointGeometryFieldMapper.java | 17 +--- .../index/mapper/AbstractScriptFieldType.java | 2 +- .../AbstractShapeGeometryFieldMapper.java | 20 +--- .../index/mapper/BinaryFieldMapper.java | 15 +-- .../index/mapper/BooleanFieldMapper.java | 34 ++++--- .../index/mapper/CompletionFieldMapper.java | 14 +-- .../index/mapper/CompositeRuntimeField.java | 2 +- .../index/mapper/DateFieldMapper.java | 32 ++++-- .../index/mapper/DocumentParser.java | 3 +- .../index/mapper/FieldMapper.java | 99 +++++++++++-------- .../index/mapper/GeoPointFieldMapper.java | 34 +++---- .../index/mapper/IpFieldMapper.java | 28 ++++-- .../index/mapper/KeywordFieldMapper.java | 17 ++-- .../elasticsearch/index/mapper/Mapper.java | 20 +++- .../index/mapper/MetadataFieldMapper.java | 2 +- .../index/mapper/NumberFieldMapper.java | 25 +++-- .../index/mapper/PlaceHolderFieldMapper.java | 7 +- .../index/mapper/RangeFieldMapper.java | 9 +- .../index/mapper/TextFieldMapper.java | 22 +++-- .../flattened/FlattenedFieldMapper.java | 6 +- .../vectors/DenseVectorFieldMapper.java | 19 ++-- .../vectors/SparseVectorFieldMapper.java | 7 +- ...appingLookupInferenceFieldMapperTests.java | 2 +- .../index/mapper/MappingLookupTests.java | 2 +- .../mapper/NonDynamicFieldMapperTests.java | 2 +- .../index/mapper/ObjectMapperMergeTests.java | 4 +- .../index/mapper/ParametrizedMapperTests.java | 34 +++++-- .../index/mapper/MockFieldMapper.java | 11 +-- .../index/mapper/NumberFieldMapperTests.java | 2 +- .../mapper/HistogramFieldMapper.java | 15 +-- .../mapper/SemanticTextFieldMapper.java | 6 +- .../AggregateDoubleMetricFieldMapper.java | 5 +- .../mapper/ConstantKeywordFieldMapper.java | 9 +- .../CountedKeywordFieldMapper.java | 8 +- .../unsignedlong/UnsignedLongFieldMapper.java | 16 +-- .../VersionStringFieldMapper.java | 20 +--- .../GeoShapeWithDocValuesFieldMapper.java | 43 ++------ .../index/mapper/PointFieldMapper.java | 8 +- .../index/mapper/ShapeFieldMapper.java | 8 +- .../wildcard/mapper/WildcardFieldMapper.java | 10 +- 54 files changed, 345 insertions(+), 440 deletions(-) diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 2808dae31239c..f41d365f305bd 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -352,7 +352,7 @@ private static int getLevels(int treeLevels, double precisionInMeters, int defau public LegacyGeoShapeFieldMapper build(MapperBuilderContext context) { LegacyGeoShapeParser parser = new LegacyGeoShapeParser(); GeoShapeFieldType ft = buildFieldType(parser, context); - return new LegacyGeoShapeFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); + return new LegacyGeoShapeFieldMapper(leafName(), ft, builderParams(this, context), parser, this); } } @@ -537,20 +537,18 @@ public PrefixTreeStrategy resolvePrefixTreeStrategy(String strategyName) { public LegacyGeoShapeFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, LegacyGeoShapeParser parser, Builder builder ) { super( simpleName, mappedFieldType, + builderParams, builder.ignoreMalformed.get(), builder.coerce.get(), builder.ignoreZValue.get(), builder.orientation.get(), - multiFields, - copyTo, parser ); this.indexCreatedVersion = builder.indexCreatedVersion; diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index b3cd3586fca54..d6225674c7626 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -139,13 +139,11 @@ private MatchOnlyTextFieldType buildFieldType(MapperBuilderContext context) { @Override public MatchOnlyTextFieldMapper build(MapperBuilderContext context) { MatchOnlyTextFieldType tft = buildFieldType(context); - MultiFields multiFields = multiFieldsBuilder.build(this, context); return new MatchOnlyTextFieldMapper( leafName(), Defaults.FIELD_TYPE, tft, - multiFields, - copyTo, + builderParams(this, context), context.isSourceSynthetic(), this ); @@ -382,12 +380,11 @@ private MatchOnlyTextFieldMapper( String simpleName, FieldType fieldType, MatchOnlyTextFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, boolean storeSource, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); assert mappedFieldType.getTextSearchInfo().isTokenized(); assert mappedFieldType.hasDocValues() == false; this.fieldType = freezeAndDeduplicateFieldType(fieldType); @@ -442,7 +439,7 @@ protected SyntheticSourceMode syntheticSourceMode() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - if (copyTo.copyToFields().isEmpty() != true) { + if (copyTo().copyToFields().isEmpty() != true) { throw new IllegalArgumentException( "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java index bd3845e1ee18a..0b475641e4290 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java @@ -98,8 +98,7 @@ public RankFeatureFieldMapper build(MapperBuilderContext context) { positiveScoreImpact.getValue(), nullValue.getValue() ), - multiFieldsBuilder.build(this, context), - copyTo, + builderParams(this, context), positiveScoreImpact.getValue(), nullValue.getValue() ); @@ -172,12 +171,11 @@ public Query termQuery(Object value, SearchExecutionContext context) { private RankFeatureFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, boolean positiveScoreImpact, Float nullValue ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); this.positiveScoreImpact = positiveScoreImpact; this.nullValue = nullValue; } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java index e6cb3010f9960..5b1d35ec03c0e 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java @@ -66,8 +66,7 @@ public RankFeaturesFieldMapper build(MapperBuilderContext context) { return new RankFeaturesFieldMapper( leafName(), new RankFeaturesFieldType(context.buildFullName(leafName()), meta.getValue(), positiveScoreImpact.getValue()), - multiFieldsBuilder.build(this, context), - copyTo, + builderParams(this, context), positiveScoreImpact.getValue() ); } @@ -122,11 +121,10 @@ private static String indexedValueForSearch(Object value) { private RankFeaturesFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, boolean positiveScoreImpact ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); this.positiveScoreImpact = positiveScoreImpact; } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index c346a7d669149..4e46105bd0534 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -197,14 +197,7 @@ public ScaledFloatFieldMapper build(MapperBuilderContext context) { metric.getValue(), indexMode ); - return new ScaledFloatFieldMapper( - leafName(), - type, - multiFieldsBuilder.build(this, context), - copyTo, - context.isSourceSynthetic(), - this - ); + return new ScaledFloatFieldMapper(leafName(), type, builderParams(this, context), context.isSourceSynthetic(), this); } } @@ -470,12 +463,11 @@ public String toString() { private ScaledFloatFieldMapper( String simpleName, ScaledFloatFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, boolean isSourceSynthetic, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo); + super(simpleName, mappedFieldType, builderParams); this.isSourceSynthetic = isSourceSynthetic; this.indexed = builder.indexed.getValue(); this.hasDocValues = builder.hasDocValues.getValue(); @@ -728,7 +720,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { + "] doesn't support synthetic source because it doesn't have doc values" ); } - if (copyTo.copyToFields().isEmpty() != true) { + if (copyTo().copyToFields().isEmpty() != true) { throw new IllegalArgumentException( "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java index d521f9b2d2a31..57ac8fdfbb023 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java @@ -262,11 +262,10 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { return new SearchAsYouTypeFieldMapper( leafName(), ft, - copyTo, + builderParams(this, context), indexAnalyzers, prefixFieldMapper, shingleFieldMappers, - multiFieldsBuilder.build(this, context), this ); } @@ -498,7 +497,7 @@ static final class PrefixFieldMapper extends FieldMapper { final FieldType fieldType; PrefixFieldMapper(FieldType fieldType, PrefixFieldType mappedFieldType) { - super(mappedFieldType.name(), mappedFieldType, MultiFields.empty(), CopyTo.empty()); + super(mappedFieldType.name(), mappedFieldType, BuilderParams.empty()); this.fieldType = Mapper.freezeAndDeduplicateFieldType(fieldType); } @@ -537,7 +536,7 @@ static final class ShingleFieldMapper extends FieldMapper { private final FieldType fieldType; ShingleFieldMapper(FieldType fieldType, ShingleFieldType mappedFieldtype) { - super(mappedFieldtype.name(), mappedFieldtype, MultiFields.empty(), CopyTo.empty()); + super(mappedFieldtype.name(), mappedFieldtype, BuilderParams.empty()); this.fieldType = freezeAndDeduplicateFieldType(fieldType); } @@ -672,14 +671,13 @@ public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRew public SearchAsYouTypeFieldMapper( String simpleName, SearchAsYouTypeFieldType mappedFieldType, - CopyTo copyTo, + BuilderParams builderParams, Map indexAnalyzers, PrefixFieldMapper prefixField, ShingleFieldMapper[] shingleFields, - MultiFields multiFields, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); this.prefixField = prefixField; this.shingleFields = shingleFields; this.maxShingleSize = builder.maxShingleSize.getValue(); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java index 9db677ddddffa..fa0a96a548a97 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java @@ -87,7 +87,7 @@ public TokenCountFieldMapper build(MapperBuilderContext context) { nullValue.getValue(), meta.getValue() ); - return new TokenCountFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, this); + return new TokenCountFieldMapper(leafName(), ft, builderParams(this, context), this); } } @@ -135,14 +135,8 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) private final boolean enablePositionIncrements; private final Integer nullValue; - protected TokenCountFieldMapper( - String simpleName, - MappedFieldType defaultFieldType, - MultiFields multiFields, - CopyTo copyTo, - Builder builder - ) { - super(simpleName, defaultFieldType, multiFields, copyTo); + protected TokenCountFieldMapper(String simpleName, MappedFieldType defaultFieldType, BuilderParams builderParams, Builder builder) { + super(simpleName, defaultFieldType, builderParams); this.analyzer = builder.analyzer.getValue(); this.enablePositionIncrements = builder.enablePositionIncrements.getValue(); this.nullValue = builder.nullValue.getValue(); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java index 7e9b6916e99d4..f6392f32a88d6 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java @@ -88,7 +88,7 @@ public Object valueForDisplay(Object value) { } protected ParentIdFieldMapper(String name, boolean eagerGlobalOrdinals) { - super(name, new ParentIdFieldType(name, eagerGlobalOrdinals), MultiFields.empty(), CopyTo.empty(), false, null); + super(name, new ParentIdFieldType(name, eagerGlobalOrdinals), BuilderParams.empty()); } @Override diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index dc760c0b07b71..ccb67f5c51acf 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -210,7 +210,7 @@ protected ParentJoinFieldMapper( boolean eagerGlobalOrdinals, List relations ) { - super(simpleName, mappedFieldType, MultiFields.empty(), CopyTo.empty(), false, null); + super(simpleName, mappedFieldType, BuilderParams.empty()); this.parentIdFields = parentIdFields; this.eagerGlobalOrdinals = eagerGlobalOrdinals; this.relations = relations; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index ad936a5491b69..576ea4dbd5d23 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -137,8 +137,6 @@ protected Parameter[] getParameters() { @Override public PercolatorFieldMapper build(MapperBuilderContext context) { PercolatorFieldType fieldType = new PercolatorFieldType(context.buildFullName(leafName()), meta.getValue()); - // TODO should percolator even allow multifields? - MultiFields multiFields = multiFieldsBuilder.build(this, context); context = context.createChildContext(leafName(), null); KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder( EXTRACTED_TERMS_FIELD_NAME, @@ -165,8 +163,7 @@ public PercolatorFieldMapper build(MapperBuilderContext context) { return new PercolatorFieldMapper( leafName(), fieldType, - multiFields, - copyTo, + builderParams(this, context), searchExecutionContext, extractedTermsField, extractionResultField, @@ -375,8 +372,7 @@ static Tuple, Map>> extractTermsAndRanges(In PercolatorFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, Supplier searchExecutionContext, KeywordFieldMapper queryTermsField, KeywordFieldMapper extractionResultField, @@ -387,7 +383,7 @@ static Tuple, Map>> extractTermsAndRanges(In IndexVersion indexCreatedVersion, Supplier clusterTransportVersion ) { - super(simpleName, mappedFieldType, multiFields, copyTo); + super(simpleName, mappedFieldType, builderParams); this.searchExecutionContext = searchExecutionContext; this.queryTermsField = queryTermsField; this.extractionResultField = extractionResultField; diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java index 2d27447b618e9..7a0caf56d6066 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java @@ -336,15 +336,7 @@ public ICUCollationKeywordFieldMapper build(MapperBuilderContext context) { ignoreAbove.getValue(), meta.getValue() ); - return new ICUCollationKeywordFieldMapper( - leafName(), - buildFieldType(), - ft, - multiFieldsBuilder.build(this, context), - copyTo, - collator, - this - ); + return new ICUCollationKeywordFieldMapper(leafName(), buildFieldType(), ft, builderParams(this, context), collator, this); } } @@ -474,12 +466,11 @@ protected ICUCollationKeywordFieldMapper( String simpleName, FieldType fieldType, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, Collator collator, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); assert collator.isFrozen(); this.fieldType = freezeAndDeduplicateFieldType(fieldType); this.params = builder.collatorParams(); diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 8d50a9f7e29a9..4f077fdcde069 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -150,7 +150,6 @@ private AnnotatedTextFieldType buildFieldType(FieldType fieldType, MapperBuilder @Override public AnnotatedTextFieldMapper build(MapperBuilderContext context) { - MultiFields multiFields = multiFieldsBuilder.build(this, context); FieldType fieldType = TextParams.buildFieldType(() -> true, store, indexOptions, norms, termVectors); if (fieldType.indexOptions() == IndexOptions.NONE) { throw new IllegalArgumentException("[" + CONTENT_TYPE + "] fields must be indexed"); @@ -162,12 +161,12 @@ public AnnotatedTextFieldMapper build(MapperBuilderContext context) { ); } } + BuilderParams builderParams = builderParams(this, context); return new AnnotatedTextFieldMapper( leafName(), fieldType, - buildFieldType(fieldType, context, multiFields), - multiFields, - copyTo, + buildFieldType(fieldType, context, builderParams.multiFields()), + builderParams, this ); } @@ -523,11 +522,10 @@ protected AnnotatedTextFieldMapper( String simpleName, FieldType fieldType, AnnotatedTextFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo); + super(simpleName, mappedFieldType, builderParams); assert fieldType.tokenized(); this.fieldType = freezeAndDeduplicateFieldType(fieldType); this.builder = builder; @@ -578,7 +576,7 @@ protected SyntheticSourceMode syntheticSourceMode() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - if (copyTo.copyToFields().isEmpty() != true) { + if (copyTo().copyToFields().isEmpty() != true) { throw new IllegalArgumentException( "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 0b29bc9062917..979ca842ef346 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -57,8 +57,7 @@ public Murmur3FieldMapper build(MapperBuilderContext context) { return new Murmur3FieldMapper( leafName(), new Murmur3FieldType(context.buildFullName(leafName()), stored.getValue(), meta.getValue()), - multiFieldsBuilder.build(this, context), - copyTo + builderParams(this, context) ); } } @@ -94,8 +93,8 @@ public Query termQuery(Object value, SearchExecutionContext context) { } } - protected Murmur3FieldMapper(String simpleName, MappedFieldType mappedFieldType, MultiFields multiFields, CopyTo copyTo) { - super(simpleName, mappedFieldType, multiFields, copyTo); + protected Murmur3FieldMapper(String simpleName, MappedFieldType mappedFieldType, BuilderParams builderParams) { + super(simpleName, mappedFieldType, builderParams); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 39f4a3a82c5c4..1065e67dccf4b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -201,32 +201,17 @@ protected BlockLoader blockLoaderFromSource(BlockLoaderContext blContext) { protected AbstractGeometryFieldMapper( String simpleName, MappedFieldType mappedFieldType, + BuilderParams builderParams, Explicit ignoreMalformed, Explicit ignoreZValue, - MultiFields multiFields, - CopyTo copyTo, Parser parser ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); this.ignoreMalformed = ignoreMalformed; this.ignoreZValue = ignoreZValue; this.parser = parser; } - protected AbstractGeometryFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, - Parser parser, - OnScriptError onScriptError - ) { - super(simpleName, mappedFieldType, multiFields, copyTo, true, onScriptError); - this.ignoreMalformed = Explicit.EXPLICIT_FALSE; - this.ignoreZValue = Explicit.EXPLICIT_FALSE; - this.parser = parser; - } - @Override @SuppressWarnings("unchecked") public AbstractGeometryFieldType fieldType() { @@ -252,7 +237,7 @@ protected boolean supportsParsingObject() { @Override public final void parse(DocumentParserContext context) throws IOException { - if (hasScript) { + if (builderParams.hasScript()) { throw new DocumentParsingException( context.parser().getTokenLocation(), "failed to parse field [" + fieldType().name() + "] of type + " + contentType() + "]", diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java index 2b4ecc8f0a89d..23005289e729b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -40,29 +40,16 @@ public static Parameter nullValueParam( protected AbstractPointGeometryFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, + BuilderParams builderParams, Explicit ignoreMalformed, Explicit ignoreZValue, T nullValue, - CopyTo copyTo, Parser parser ) { - super(simpleName, mappedFieldType, ignoreMalformed, ignoreZValue, multiFields, copyTo, parser); + super(simpleName, mappedFieldType, builderParams, ignoreMalformed, ignoreZValue, parser); this.nullValue = nullValue; } - protected AbstractPointGeometryFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, - Parser parser, - OnScriptError onScriptError - ) { - super(simpleName, mappedFieldType, multiFields, copyTo, parser, onScriptError); - this.nullValue = null; - } - public T getNullValue() { return nullValue; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java index cf453bd1571be..92cb1a0d85d8f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java @@ -244,7 +244,7 @@ protected abstract static class Builder extends RuntimeField.Builder { ).setSerializerCheck((id, ic, v) -> ic); private final FieldMapper.Parameter onScriptError = FieldMapper.Parameter.onScriptErrorParam( - m -> m.onScriptError, + m -> m.builderParams.onScriptError(), script ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java index 619c6c6613d59..ef20a40f6e9de 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java @@ -76,30 +76,14 @@ protected Object nullValueAsSource(T nullValue) { protected AbstractShapeGeometryFieldMapper( String simpleName, MappedFieldType mappedFieldType, + BuilderParams builderParams, Explicit ignoreMalformed, Explicit coerce, Explicit ignoreZValue, Explicit orientation, - MultiFields multiFields, - CopyTo copyTo, Parser parser ) { - super(simpleName, mappedFieldType, ignoreMalformed, ignoreZValue, multiFields, copyTo, parser); - this.coerce = coerce; - this.orientation = orientation; - } - - protected AbstractShapeGeometryFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - Explicit coerce, - Explicit orientation, - CopyTo copyTo, - Parser parser, - OnScriptError onScriptError - ) { - super(simpleName, mappedFieldType, multiFields, copyTo, parser, onScriptError); + super(simpleName, mappedFieldType, builderParams, ignoreMalformed, ignoreZValue, parser); this.coerce = coerce; this.orientation = orientation; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index 1891e19b6d036..99a04f5d08337 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -72,8 +72,7 @@ public BinaryFieldMapper build(MapperBuilderContext context) { return new BinaryFieldMapper( leafName(), new BinaryFieldType(context.buildFullName(leafName()), stored.getValue(), hasDocValues.getValue(), meta.getValue()), - multiFieldsBuilder.build(this, context), - copyTo, + builderParams(this, context), this ); } @@ -142,14 +141,8 @@ public Query termQuery(Object value, SearchExecutionContext context) { private final boolean hasDocValues; private final boolean isSyntheticSourceEnabledViaIndexMode; - protected BinaryFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, - Builder builder - ) { - super(simpleName, mappedFieldType, multiFields, copyTo); + protected BinaryFieldMapper(String simpleName, MappedFieldType mappedFieldType, BuilderParams builderParams, Builder builder) { + super(simpleName, mappedFieldType, builderParams); this.stored = builder.stored.getValue(); this.hasDocValues = builder.hasDocValues.getValue(); this.isSyntheticSourceEnabledViaIndexMode = builder.isSyntheticSourceEnabledViaIndexMode; @@ -207,7 +200,7 @@ protected SyntheticSourceMode syntheticSourceMode() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - if (copyTo.copyToFields().isEmpty() != true) { + if (copyTo().copyToFields().isEmpty() != true) { throw new IllegalArgumentException( "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 1f0088ec96478..59db5c35e40bb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -89,7 +89,10 @@ public static final class Builder extends FieldMapper.DimensionBuilder { ).acceptsNull(); private final Parameter