From b357936227df04f88c980b6cbf7cd7cde982a25b Mon Sep 17 00:00:00 2001 From: Dmitriy Burlutskiy Date: Mon, 16 Dec 2024 15:36:50 +0100 Subject: [PATCH 01/62] Revert "Support mTLS in Elastic Inference Service plugin (#116423)" (#118765) This reverts commit 74a4484101dd65a0194f4adc3bd23fe39c2f2bd7. --- docs/changelog/116423.yaml | 5 - .../xpack/core/ssl/SSLService.java | 2 - .../core/LocalStateCompositeXPackPlugin.java | 2 +- .../xpack/core/ssl/SSLServiceTests.java | 3 +- .../ShardBulkInferenceActionFilterIT.java | 3 +- .../integration/ModelRegistryIT.java | 4 +- .../inference/src/main/java/module-info.java | 1 - .../xpack/inference/InferencePlugin.java | 101 +++++------------- .../external/http/HttpClientManager.java | 44 -------- .../TextSimilarityRankRetrieverBuilder.java | 11 +- .../ElasticInferenceServiceSettings.java | 24 +---- .../SemanticTextClusterMetadataTests.java | 3 +- .../xpack/inference/InferencePluginTests.java | 65 ----------- .../inference/LocalStateInferencePlugin.java | 71 ------------ .../elasticsearch/xpack/inference/Utils.java | 15 +++ ...emanticTextNonDynamicFieldMapperTests.java | 3 +- .../TextSimilarityRankMultiNodeTests.java | 4 +- ...SimilarityRankRetrieverTelemetryTests.java | 5 +- .../TextSimilarityRankTests.java | 4 +- .../xpack/ml/LocalStateMachineLearning.java | 7 -- .../xpack/ml/support/BaseMlIntegTestCase.java | 4 +- .../security/CrossClusterShardTests.java | 2 + 22 files changed, 69 insertions(+), 314 deletions(-) delete mode 100644 docs/changelog/116423.yaml delete mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java delete mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java diff --git a/docs/changelog/116423.yaml b/docs/changelog/116423.yaml deleted file mode 100644 index d6d10eab410e4..0000000000000 --- a/docs/changelog/116423.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116423 -summary: Support mTLS for the Elastic Inference Service integration inside the inference API -area: Machine Learning -type: feature -issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index d0d5e463f9652..9704335776f11 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -596,8 +596,6 @@ static Map getSSLSettingsMap(Settings settings) { sslSettingsMap.put(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX, settings.getByPrefix(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX)); sslSettingsMap.put(XPackSettings.TRANSPORT_SSL_PREFIX, settings.getByPrefix(XPackSettings.TRANSPORT_SSL_PREFIX)); sslSettingsMap.putAll(getTransportProfileSSLSettings(settings)); - // Mount Elastic Inference Service (part of the Inference plugin) configuration - sslSettingsMap.put("xpack.inference.elastic.http.ssl", settings.getByPrefix("xpack.inference.elastic.http.ssl.")); // Only build remote cluster server SSL if the port is enabled if (REMOTE_CLUSTER_SERVER_ENABLED.get(settings)) { sslSettingsMap.put(XPackSettings.REMOTE_CLUSTER_SERVER_SSL_PREFIX, getRemoteClusterServerSslSettings(settings)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index d50f7bb27a5df..1f2c89c473a62 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -623,7 +623,7 @@ public Map getSnapshotCommitSup } @SuppressWarnings("unchecked") - protected List filterPlugins(Class type) { + private List filterPlugins(Class type) { return plugins.stream().filter(x -> type.isAssignableFrom(x.getClass())).map(p -> ((T) p)).collect(Collectors.toList()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java index bfac286bc3c35..9663e41a647a8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java @@ -614,8 +614,7 @@ public void testGetConfigurationByContextName() throws Exception { "xpack.security.authc.realms.ldap.realm1.ssl", "xpack.security.authc.realms.saml.realm2.ssl", "xpack.monitoring.exporters.mon1.ssl", - "xpack.monitoring.exporters.mon2.ssl", - "xpack.inference.elastic.http.ssl" }; + "xpack.monitoring.exporters.mon2.ssl" }; assumeTrue("Not enough cipher suites are available to support this test", getCipherSuites.length >= contextNames.length); diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java index c7b3a9d42f579..3b0fc869c8124 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.elasticsearch.xpack.inference.Utils; import org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension; import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; @@ -59,7 +58,7 @@ public void setup() throws Exception { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateInferencePlugin.class); + return Arrays.asList(Utils.TestInferencePlugin.class); } public void testBulkOperations() throws Exception { diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java index d5c156d1d4f46..be6b3725b0f35 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java @@ -31,7 +31,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalModel; @@ -76,7 +76,7 @@ public void createComponents() { @Override protected Collection> getPlugins() { - return pluginList(ReindexPlugin.class, LocalStateInferencePlugin.class); + return pluginList(ReindexPlugin.class, InferencePlugin.class); } public void testStoreModel() throws Exception { diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index 1c2240e8c5217..53974657e4e23 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -34,7 +34,6 @@ requires software.amazon.awssdk.retries.api; requires org.reactivestreams; requires org.elasticsearch.logging; - requires org.elasticsearch.sslconfig; exports org.elasticsearch.xpack.inference.action; exports org.elasticsearch.xpack.inference.registry; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 93743a5485c2c..ea92b7d98fe30 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -28,7 +28,6 @@ import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InferenceServiceRegistry; -import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.node.PluginComponentBinding; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; @@ -46,7 +45,6 @@ import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.inference.action.DeleteInferenceEndpointAction; import org.elasticsearch.xpack.core.inference.action.GetInferenceDiagnosticsAction; @@ -56,7 +54,6 @@ import org.elasticsearch.xpack.core.inference.action.PutInferenceModelAction; import org.elasticsearch.xpack.core.inference.action.UnifiedCompletionAction; import org.elasticsearch.xpack.core.inference.action.UpdateInferenceModelAction; -import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.inference.action.TransportDeleteInferenceEndpointAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceDiagnosticsAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceModelAction; @@ -121,6 +118,7 @@ import java.util.Map; import java.util.function.Predicate; import java.util.function.Supplier; +import java.util.stream.Collectors; import java.util.stream.Stream; import static java.util.Collections.singletonList; @@ -154,7 +152,6 @@ public class InferencePlugin extends Plugin implements ActionPlugin, ExtensibleP private final Settings settings; private final SetOnce httpFactory = new SetOnce<>(); private final SetOnce amazonBedrockFactory = new SetOnce<>(); - private final SetOnce elasicInferenceServiceFactory = new SetOnce<>(); private final SetOnce serviceComponents = new SetOnce<>(); private final SetOnce elasticInferenceServiceComponents = new SetOnce<>(); private final SetOnce inferenceServiceRegistry = new SetOnce<>(); @@ -237,31 +234,31 @@ public Collection createComponents(PluginServices services) { var inferenceServices = new ArrayList<>(inferenceServiceExtensions); inferenceServices.add(this::getInferenceServiceFactories); - if (isElasticInferenceServiceEnabled()) { - // Create a separate instance of HTTPClientManager with its own SSL configuration (`xpack.inference.elastic.http.ssl.*`). - var elasticInferenceServiceHttpClientManager = HttpClientManager.create( - settings, - services.threadPool(), - services.clusterService(), - throttlerManager, - getSslService() - ); + // Set elasticInferenceUrl based on feature flags to support transitioning to the new Elastic Inference Service URL without exposing + // internal names like "eis" or "gateway". + ElasticInferenceServiceSettings inferenceServiceSettings = new ElasticInferenceServiceSettings(settings); + + String elasticInferenceUrl = null; - var elasticInferenceServiceRequestSenderFactory = new HttpRequestSender.Factory( - serviceComponents.get(), - elasticInferenceServiceHttpClientManager, - services.clusterService() + if (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + elasticInferenceUrl = inferenceServiceSettings.getElasticInferenceServiceUrl(); + } else if (DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + log.warn( + "Deprecated flag {} detected for enabling {}. Please use {}.", + ELASTIC_INFERENCE_SERVICE_IDENTIFIER, + DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG, + ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG ); - elasicInferenceServiceFactory.set(elasticInferenceServiceRequestSenderFactory); + elasticInferenceUrl = inferenceServiceSettings.getEisGatewayUrl(); + } - ElasticInferenceServiceSettings inferenceServiceSettings = new ElasticInferenceServiceSettings(settings); - String elasticInferenceUrl = this.getElasticInferenceServiceUrl(inferenceServiceSettings); + if (elasticInferenceUrl != null) { elasticInferenceServiceComponents.set(new ElasticInferenceServiceComponents(elasticInferenceUrl)); inferenceServices.add( () -> List.of( context -> new ElasticInferenceService( - elasicInferenceServiceFactory.get(), + httpFactory.get(), serviceComponents.get(), elasticInferenceServiceComponents.get() ) @@ -384,21 +381,16 @@ public static ExecutorBuilder inferenceUtilityExecutor(Settings settings) { @Override public List> getSettings() { - ArrayList> settings = new ArrayList<>(); - settings.addAll(HttpSettings.getSettingsDefinitions()); - settings.addAll(HttpClientManager.getSettingsDefinitions()); - settings.addAll(ThrottlerManager.getSettingsDefinitions()); - settings.addAll(RetrySettings.getSettingsDefinitions()); - settings.addAll(Truncator.getSettingsDefinitions()); - settings.addAll(RequestExecutorServiceSettings.getSettingsDefinitions()); - settings.add(SKIP_VALIDATE_AND_START); - - // Register Elastic Inference Service settings definitions if the corresponding feature flag is enabled. - if (isElasticInferenceServiceEnabled()) { - settings.addAll(ElasticInferenceServiceSettings.getSettingsDefinitions()); - } - - return settings; + return Stream.of( + HttpSettings.getSettingsDefinitions(), + HttpClientManager.getSettingsDefinitions(), + ThrottlerManager.getSettingsDefinitions(), + RetrySettings.getSettingsDefinitions(), + ElasticInferenceServiceSettings.getSettingsDefinitions(), + Truncator.getSettingsDefinitions(), + RequestExecutorServiceSettings.getSettingsDefinitions(), + List.of(SKIP_VALIDATE_AND_START) + ).flatMap(Collection::stream).collect(Collectors.toList()); } @Override @@ -446,10 +438,7 @@ public List getQueryRewriteInterceptors() { @Override public List> getRetrievers() { return List.of( - new RetrieverSpec<>( - new ParseField(TextSimilarityRankBuilder.NAME), - (parser, context) -> TextSimilarityRankRetrieverBuilder.fromXContent(parser, context, getLicenseState()) - ), + new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent), new RetrieverSpec<>(new ParseField(RandomRankBuilder.NAME), RandomRankRetrieverBuilder::fromXContent) ); } @@ -458,36 +447,4 @@ public List> getRetrievers() { public Map getHighlighters() { return Map.of(SemanticTextHighlighter.NAME, new SemanticTextHighlighter()); } - - // Get Elastic Inference service URL based on feature flags to support transitioning - // to the new Elastic Inference Service URL. - private String getElasticInferenceServiceUrl(ElasticInferenceServiceSettings settings) { - String elasticInferenceUrl = null; - - if (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - elasticInferenceUrl = settings.getElasticInferenceServiceUrl(); - } else if (DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - log.warn( - "Deprecated flag {} detected for enabling {}. Please use {}.", - ELASTIC_INFERENCE_SERVICE_IDENTIFIER, - DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG, - ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG - ); - elasticInferenceUrl = settings.getEisGatewayUrl(); - } - - return elasticInferenceUrl; - } - - protected Boolean isElasticInferenceServiceEnabled() { - return (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() || DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()); - } - - protected SSLService getSslService() { - return XPackPlugin.getSharedSslService(); - } - - protected XPackLicenseState getLicenseState() { - return XPackPlugin.getSharedLicenseState(); - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java index 6d09c9e67b363..e5d76b9bb5570 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java @@ -7,14 +7,9 @@ package org.elasticsearch.xpack.inference.external.http; -import org.apache.http.config.Registry; -import org.apache.http.config.RegistryBuilder; import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.nio.conn.NoopIOSessionStrategy; -import org.apache.http.nio.conn.SchemeIOSessionStrategy; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.nio.reactor.ConnectingIOReactor; import org.apache.http.nio.reactor.IOReactorException; import org.apache.http.pool.PoolStats; @@ -26,7 +21,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import java.io.Closeable; @@ -34,13 +28,11 @@ import java.util.List; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX; public class HttpClientManager implements Closeable { private static final Logger logger = LogManager.getLogger(HttpClientManager.class); /** * The maximum number of total connections the connection pool can lease to all routes. - * The configuration applies to each instance of HTTPClientManager (max_total_connections=10 and instances=5 leads to 50 connections). * From googling around the connection pools maxTotal value should be close to the number of available threads. * * https://stackoverflow.com/questions/30989637/how-to-decide-optimal-settings-for-setmaxtotal-and-setdefaultmaxperroute @@ -55,7 +47,6 @@ public class HttpClientManager implements Closeable { /** * The max number of connections a single route can lease. - * This configuration applies to each instance of HttpClientManager. */ public static final Setting MAX_ROUTE_CONNECTIONS = Setting.intSetting( "xpack.inference.http.max_route_connections", @@ -107,22 +98,6 @@ public static HttpClientManager create( return new HttpClientManager(settings, connectionManager, threadPool, clusterService, throttlerManager); } - public static HttpClientManager create( - Settings settings, - ThreadPool threadPool, - ClusterService clusterService, - ThrottlerManager throttlerManager, - SSLService sslService - ) { - // Set the sslStrategy to ensure an encrypted connection, as Elastic Inference Service requires it. - SSLIOSessionStrategy sslioSessionStrategy = sslService.sslIOSessionStrategy( - sslService.getSSLConfiguration(ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX) - ); - - PoolingNHttpClientConnectionManager connectionManager = createConnectionManager(sslioSessionStrategy); - return new HttpClientManager(settings, connectionManager, threadPool, clusterService, throttlerManager); - } - // Default for testing HttpClientManager( Settings settings, @@ -146,25 +121,6 @@ public static HttpClientManager create( this.addSettingsUpdateConsumers(clusterService); } - private static PoolingNHttpClientConnectionManager createConnectionManager(SSLIOSessionStrategy sslStrategy) { - ConnectingIOReactor ioReactor; - try { - var configBuilder = IOReactorConfig.custom().setSoKeepAlive(true); - ioReactor = new DefaultConnectingIOReactor(configBuilder.build()); - } catch (IOReactorException e) { - var message = "Failed to initialize HTTP client manager with SSL."; - logger.error(message, e); - throw new ElasticsearchException(message, e); - } - - Registry registry = RegistryBuilder.create() - .register("http", NoopIOSessionStrategy.INSTANCE) - .register("https", sslStrategy) - .build(); - - return new PoolingNHttpClientConnectionManager(ioReactor, registry); - } - private static PoolingNHttpClientConnectionManager createConnectionManager() { ConnectingIOReactor ioReactor; try { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index f54696895a818..fd2427dc8ac6a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; @@ -22,6 +21,7 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; import java.util.List; @@ -73,11 +73,8 @@ public class TextSimilarityRankRetrieverBuilder extends CompoundRetrieverBuilder RetrieverBuilder.declareBaseParserFields(TextSimilarityRankBuilder.NAME, PARSER); } - public static TextSimilarityRankRetrieverBuilder fromXContent( - XContentParser parser, - RetrieverParserContext context, - XPackLicenseState licenceState - ) throws IOException { + public static TextSimilarityRankRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) + throws IOException { if (context.clusterSupportsFeature(TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED) == false) { throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + TextSimilarityRankBuilder.NAME + "]"); } @@ -86,7 +83,7 @@ public static TextSimilarityRankRetrieverBuilder fromXContent( "[text_similarity_reranker] retriever composition feature is not supported by all nodes in the cluster" ); } - if (TextSimilarityRankBuilder.TEXT_SIMILARITY_RERANKER_FEATURE.check(licenceState) == false) { + if (TextSimilarityRankBuilder.TEXT_SIMILARITY_RERANKER_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { throw LicenseUtils.newComplianceException(TextSimilarityRankBuilder.NAME); } return PARSER.apply(parser, context); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java index 431a3647e2879..bc2daddc2a346 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java @@ -9,9 +9,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; -import java.util.ArrayList; import java.util.List; public class ElasticInferenceServiceSettings { @@ -19,8 +17,6 @@ public class ElasticInferenceServiceSettings { @Deprecated static final Setting EIS_GATEWAY_URL = Setting.simpleString("xpack.inference.eis.gateway.url", Setting.Property.NodeScope); - public static final String ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX = "xpack.inference.elastic.http.ssl."; - static final Setting ELASTIC_INFERENCE_SERVICE_URL = Setting.simpleString( "xpack.inference.elastic.url", Setting.Property.NodeScope @@ -35,27 +31,11 @@ public class ElasticInferenceServiceSettings { public ElasticInferenceServiceSettings(Settings settings) { eisGatewayUrl = EIS_GATEWAY_URL.get(settings); elasticInferenceServiceUrl = ELASTIC_INFERENCE_SERVICE_URL.get(settings); - } - - public static final SSLConfigurationSettings ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_SETTINGS = SSLConfigurationSettings.withPrefix( - ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX, - false - ); - public static final Setting ELASTIC_INFERENCE_SERVICE_SSL_ENABLED = Setting.boolSetting( - ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX + "enabled", - true, - Setting.Property.NodeScope - ); + } public static List> getSettingsDefinitions() { - ArrayList> settings = new ArrayList<>(); - settings.add(EIS_GATEWAY_URL); - settings.add(ELASTIC_INFERENCE_SERVICE_URL); - settings.add(ELASTIC_INFERENCE_SERVICE_SSL_ENABLED); - settings.addAll(ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_SETTINGS.getEnabledSettings()); - - return settings; + return List.of(EIS_GATEWAY_URL, ELASTIC_INFERENCE_SERVICE_URL); } @Deprecated diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java index 61033a0211065..bfec2d5ac3484 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.inference.InferencePlugin; import org.hamcrest.Matchers; @@ -29,7 +28,7 @@ public class SemanticTextClusterMetadataTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(XPackPlugin.class, InferencePlugin.class); + return List.of(InferencePlugin.class); } public void testCreateIndexWithSemanticTextField() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java deleted file mode 100644 index d1db5b8b12cc6..0000000000000 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference; - -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSettings; -import org.junit.After; -import org.junit.Before; - -import static org.hamcrest.Matchers.is; - -public class InferencePluginTests extends ESTestCase { - private InferencePlugin inferencePlugin; - - private Boolean elasticInferenceServiceEnabled = true; - - private void setElasticInferenceServiceEnabled(Boolean elasticInferenceServiceEnabled) { - this.elasticInferenceServiceEnabled = elasticInferenceServiceEnabled; - } - - @Before - public void setUp() throws Exception { - super.setUp(); - - Settings settings = Settings.builder().build(); - inferencePlugin = new InferencePlugin(settings) { - @Override - protected Boolean isElasticInferenceServiceEnabled() { - return elasticInferenceServiceEnabled; - } - }; - } - - @After - public void tearDown() throws Exception { - super.tearDown(); - } - - public void testElasticInferenceServiceSettingsPresent() throws Exception { - setElasticInferenceServiceEnabled(true); // enable elastic inference service - boolean anyMatch = inferencePlugin.getSettings() - .stream() - .map(Setting::getKey) - .anyMatch(key -> key.startsWith(ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX)); - - assertThat("xpack.inference.elastic settings are present", anyMatch, is(true)); - } - - public void testElasticInferenceServiceSettingsNotPresent() throws Exception { - setElasticInferenceServiceEnabled(false); // disable elastic inference service - boolean noneMatch = inferencePlugin.getSettings() - .stream() - .map(Setting::getKey) - .noneMatch(key -> key.startsWith(ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX)); - - assertThat("xpack.inference.elastic settings are not present", noneMatch, is(true)); - } -} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java deleted file mode 100644 index 68ea175bd9870..0000000000000 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference; - -import org.elasticsearch.action.support.MappedActionFilter; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.inference.InferenceServiceExtension; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension; -import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; - -import java.nio.file.Path; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import static java.util.stream.Collectors.toList; - -public class LocalStateInferencePlugin extends LocalStateCompositeXPackPlugin { - private final InferencePlugin inferencePlugin; - - public LocalStateInferencePlugin(final Settings settings, final Path configPath) throws Exception { - super(settings, configPath); - LocalStateInferencePlugin thisVar = this; - this.inferencePlugin = new InferencePlugin(settings) { - @Override - protected SSLService getSslService() { - return thisVar.getSslService(); - } - - @Override - protected XPackLicenseState getLicenseState() { - return thisVar.getLicenseState(); - } - - @Override - public List getInferenceServiceFactories() { - return List.of( - TestSparseInferenceServiceExtension.TestInferenceService::new, - TestDenseInferenceServiceExtension.TestInferenceService::new - ); - } - }; - plugins.add(inferencePlugin); - } - - @Override - public List> getRetrievers() { - return this.filterPlugins(SearchPlugin.class).stream().flatMap(p -> p.getRetrievers().stream()).collect(toList()); - } - - @Override - public Map getMappers() { - return inferencePlugin.getMappers(); - } - - @Override - public Collection getMappedActionFilters() { - return inferencePlugin.getMappedActionFilters(); - } - -} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index 0f322e64755be..9395ae222e9ba 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; @@ -142,6 +143,20 @@ private static void blockingCall( latch.await(); } + public static class TestInferencePlugin extends InferencePlugin { + public TestInferencePlugin(Settings settings) { + super(settings); + } + + @Override + public List getInferenceServiceFactories() { + return List.of( + TestSparseInferenceServiceExtension.TestInferenceService::new, + TestDenseInferenceServiceExtension.TestInferenceService::new + ); + } + } + public static Model getInvalidModel(String inferenceEntityId, String serviceName) { var mockConfigs = mock(ModelConfigurations.class); when(mockConfigs.getInferenceEntityId()).thenReturn(inferenceEntityId); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java index 24183b21f73e7..1f58c4165056d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.index.mapper.NonDynamicFieldMapperTests; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.elasticsearch.xpack.inference.Utils; import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; import org.junit.Before; @@ -27,7 +26,7 @@ public void setup() throws Exception { @Override protected Collection> getPlugins() { - return List.of(LocalStateInferencePlugin.class); + return List.of(Utils.TestInferencePlugin.class); } @Override diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java index daed03c198e0d..6d6403b69ea11 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.rank.RankBuilder; import org.elasticsearch.search.rank.rerank.AbstractRerankerIT; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; +import org.elasticsearch.xpack.inference.InferencePlugin; import java.util.Collection; import java.util.List; @@ -40,7 +40,7 @@ protected RankBuilder getThrowingRankBuilder(int rankWindowSize, String rankFeat @Override protected Collection> pluginsNeeded() { - return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); + return List.of(InferencePlugin.class, TextSimilarityTestPlugin.class); } public void testQueryPhaseShardThrowingAllShardsFail() throws Exception { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java index ba6924ba0ff3b..084a7f3de4a53 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java @@ -24,7 +24,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.junit.Before; import java.io.IOException; @@ -46,7 +47,7 @@ protected boolean addMockHttpTransport() { @Override protected Collection> nodePlugins() { - return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); + return List.of(InferencePlugin.class, XPackPlugin.class, TextSimilarityTestPlugin.class); } @Override diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java index f81f2965c392e..a042fca44fdb5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.xpack.core.inference.action.InferenceAction; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.junit.Before; import java.util.Collection; @@ -108,7 +108,7 @@ protected InferenceAction.Request generateRequest(List docFeatures) { @Override protected Collection> getPlugins() { - return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); + return List.of(InferencePlugin.class, TextSimilarityTestPlugin.class); } @Before diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java index ff1a1d19779df..bab012afc3101 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.monitoring.Monitoring; import org.elasticsearch.xpack.security.Security; @@ -87,12 +86,6 @@ protected XPackLicenseState getLicenseState() { } }); plugins.add(new MockedRollupPlugin()); - plugins.add(new InferencePlugin(settings) { - @Override - protected SSLService getSslService() { - return thisVar.getSslService(); - } - }); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 5cf15454e47f2..aeebfabdce704 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -82,6 +82,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.utils.MlTaskState; import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; @@ -160,7 +161,8 @@ protected Collection> nodePlugins() { DataStreamsPlugin.class, // To remove errors from parsing build in templates that contain scaled_float MapperExtrasPlugin.class, - Wildcard.class + Wildcard.class, + InferencePlugin.class ); } diff --git a/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java b/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java index 057ebdece5c61..ab5be0f48f5f3 100644 --- a/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java +++ b/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.frozen.FrozenIndices; import org.elasticsearch.xpack.graph.Graph; import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.profiling.ProfilingPlugin; import org.elasticsearch.xpack.rollup.Rollup; import org.elasticsearch.xpack.search.AsyncSearch; @@ -88,6 +89,7 @@ protected Collection> getPlugins() { FrozenIndices.class, Graph.class, IndexLifecycle.class, + InferencePlugin.class, IngestCommonPlugin.class, IngestTestPlugin.class, MustachePlugin.class, From cf73860b583e14c11eab28197b15b247236e4021 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 18 Dec 2024 15:57:12 +0000 Subject: [PATCH 02/62] Revert "Remove pre-7.2 token serialization support (#118057)" (#118967) * Revert "Remove pre-7.2 token serialization support (#118057)" This reverts commit ec66857ca13e2f5e7f9088a30aa48ea5ddab17fa. * Add missing constant --- .../org/elasticsearch/TransportVersions.java | 3 + .../security/SecurityFeatureSetUsage.java | 12 +- .../support/TokensInvalidationResult.java | 6 + .../security/authc/TokenAuthIntegTests.java | 37 +-- .../xpack/security/authc/TokenService.java | 236 ++++++++++++----- .../security/authc/TokenServiceTests.java | 241 +++++++++++++++++- 6 files changed, 447 insertions(+), 88 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 388123e86c882..fd8a3987cf4d3 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -52,7 +52,10 @@ static TransportVersion def(int id) { @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // remove the transport versions with which v9 will not need to interact public static final TransportVersion ZERO = def(0); public static final TransportVersion V_7_0_0 = def(7_00_00_99); + public static final TransportVersion V_7_1_0 = def(7_01_00_99); + public static final TransportVersion V_7_2_0 = def(7_02_00_99); public static final TransportVersion V_7_3_0 = def(7_03_00_99); + public static final TransportVersion V_7_3_2 = def(7_03_02_99); public static final TransportVersion V_7_4_0 = def(7_04_00_99); public static final TransportVersion V_7_6_0 = def(7_06_00_99); public static final TransportVersion V_7_7_0 = def(7_07_00_99); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index f44409daa37f8..3ebfad04a0f13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -55,8 +55,10 @@ public SecurityFeatureSetUsage(StreamInput in) throws IOException { realmsUsage = in.readGenericMap(); rolesStoreUsage = in.readGenericMap(); sslUsage = in.readGenericMap(); - tokenServiceUsage = in.readGenericMap(); - apiKeyServiceUsage = in.readGenericMap(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { + tokenServiceUsage = in.readGenericMap(); + apiKeyServiceUsage = in.readGenericMap(); + } auditUsage = in.readGenericMap(); ipFilterUsage = in.readGenericMap(); anonymousUsage = in.readGenericMap(); @@ -121,8 +123,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(realmsUsage); out.writeGenericMap(rolesStoreUsage); out.writeGenericMap(sslUsage); - out.writeGenericMap(tokenServiceUsage); - out.writeGenericMap(apiKeyServiceUsage); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { + out.writeGenericMap(tokenServiceUsage); + out.writeGenericMap(apiKeyServiceUsage); + } out.writeGenericMap(auditUsage); out.writeGenericMap(ipFilterUsage); out.writeGenericMap(anonymousUsage); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java index 59c16fc8a7a72..8fe018a825468 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java @@ -59,6 +59,9 @@ public TokensInvalidationResult(StreamInput in) throws IOException { this.invalidatedTokens = in.readStringCollectionAsList(); this.previouslyInvalidatedTokens = in.readStringCollectionAsList(); this.errors = in.readCollectionAsList(StreamInput::readException); + if (in.getTransportVersion().before(TransportVersions.V_7_2_0)) { + in.readVInt(); + } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.restStatus = RestStatus.readFrom(in); } @@ -108,6 +111,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(invalidatedTokens); out.writeStringCollection(previouslyInvalidatedTokens); out.writeCollection(errors, StreamOutput::writeException); + if (out.getTransportVersion().before(TransportVersions.V_7_2_0)) { + out.writeVInt(5); + } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { RestStatus.writeTo(out, restStatus); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index b56ea7ae3e456..fef1a98ca67e9 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -327,8 +327,8 @@ public void testInvalidateNotValidAccessTokens() throws Exception { ResponseException.class, () -> invalidateAccessToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.MINIMUM_COMPATIBLE, - tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, randomBoolean()).v1() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, randomBoolean()).v1() ) ) ); @@ -347,7 +347,7 @@ public void testInvalidateNotValidAccessTokens() throws Exception { byte[] longerAccessToken = new byte[randomIntBetween(17, 24)]; random().nextBytes(longerAccessToken); invalidateResponse = invalidateAccessToken( - tokenService.prependVersionAndEncodeAccessToken(TransportVersions.MINIMUM_COMPATIBLE, longerAccessToken) + tokenService.prependVersionAndEncodeAccessToken(TransportVersions.V_7_3_2, longerAccessToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -365,7 +365,7 @@ public void testInvalidateNotValidAccessTokens() throws Exception { byte[] shorterAccessToken = new byte[randomIntBetween(12, 15)]; random().nextBytes(shorterAccessToken); invalidateResponse = invalidateAccessToken( - tokenService.prependVersionAndEncodeAccessToken(TransportVersions.MINIMUM_COMPATIBLE, shorterAccessToken) + tokenService.prependVersionAndEncodeAccessToken(TransportVersions.V_7_3_2, shorterAccessToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -394,8 +394,8 @@ public void testInvalidateNotValidAccessTokens() throws Exception { invalidateResponse = invalidateAccessToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.MINIMUM_COMPATIBLE, - tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, randomBoolean()).v1() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, randomBoolean()).v1() ) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); @@ -420,8 +420,8 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { ResponseException.class, () -> invalidateRefreshToken( TokenService.prependVersionAndEncodeRefreshToken( - TransportVersions.MINIMUM_COMPATIBLE, - tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, true).v2() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, true).v2() ) ) ); @@ -441,7 +441,7 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { byte[] longerRefreshToken = new byte[randomIntBetween(17, 24)]; random().nextBytes(longerRefreshToken); invalidateResponse = invalidateRefreshToken( - TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.MINIMUM_COMPATIBLE, longerRefreshToken) + TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.V_7_3_2, longerRefreshToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -459,7 +459,7 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { byte[] shorterRefreshToken = new byte[randomIntBetween(12, 15)]; random().nextBytes(shorterRefreshToken); invalidateResponse = invalidateRefreshToken( - TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.MINIMUM_COMPATIBLE, shorterRefreshToken) + TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.V_7_3_2, shorterRefreshToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -488,8 +488,8 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { invalidateResponse = invalidateRefreshToken( TokenService.prependVersionAndEncodeRefreshToken( - TransportVersions.MINIMUM_COMPATIBLE, - tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, true).v2() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, true).v2() ) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); @@ -758,11 +758,18 @@ public void testAuthenticateWithWrongToken() throws Exception { assertAuthenticateWithToken(response.accessToken(), TEST_USER_NAME); // Now attempt to authenticate with an invalid access token string assertUnauthorizedToken(randomAlphaOfLengthBetween(0, 128)); - // Now attempt to authenticate with an invalid access token with valid structure (after 8.0 pre 8.10) + // Now attempt to authenticate with an invalid access token with valid structure (pre 7.2) assertUnauthorizedToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.V_8_0_0, - tokenService.getRandomTokenBytes(TransportVersions.V_8_0_0, randomBoolean()).v1() + TransportVersions.V_7_1_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_1_0, randomBoolean()).v1() + ) + ); + // Now attempt to authenticate with an invalid access token with valid structure (after 7.2 pre 8.10) + assertUnauthorizedToken( + tokenService.prependVersionAndEncodeAccessToken( + TransportVersions.V_7_4_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_4_0, randomBoolean()).v1() ) ); // Now attempt to authenticate with an invalid access token with valid structure (current version) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 900436a1fd874..4f7ba7808b823 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -48,7 +48,9 @@ import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -57,6 +59,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Streams; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -90,8 +93,10 @@ import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.IOException; +import java.io.OutputStream; import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -127,6 +132,7 @@ import javax.crypto.Cipher; import javax.crypto.CipherInputStream; +import javax.crypto.CipherOutputStream; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; @@ -195,8 +201,14 @@ public class TokenService { // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars private static final int TOKEN_LENGTH = 22; private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_"; + static final int LEGACY_MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; static final int MINIMUM_BYTES = VERSION_BYTES + TOKEN_LENGTH + 1; + static final int LEGACY_MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * LEGACY_MINIMUM_BYTES) / 3)).intValue(); public static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); + static final TransportVersion VERSION_HASHED_TOKENS = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_TOKENS_INDEX_INTRODUCED = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersions.V_7_2_0; static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersions.V_8_2_0; static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_10_X; @@ -261,7 +273,8 @@ public TokenService( /** * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with - * auto-generated values. The created tokens are stored a specific security tokens index. + * auto-generated values. The created tokens are stored in the security index for versions up to + * {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a specific security tokens index for later versions. */ public void createOAuth2Tokens( Authentication authentication, @@ -278,7 +291,8 @@ public void createOAuth2Tokens( /** * Creates an access token and optionally a refresh token as well from predefined values, based on the provided authentication and - * metadata. The created tokens are stored in a specific security tokens index. + * metadata. The created tokens are stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a + * specific security tokens index for later versions. */ // public for testing public void createOAuth2Tokens( @@ -300,15 +314,21 @@ public void createOAuth2Tokens( * * @param accessTokenBytes The predefined seed value for the access token. This will then be *
    - *
  • Hashed before stored
  • - *
  • Stored in a specific security tokens index
  • + *
  • Encrypted before stored for versions before {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after + * {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • *
  • Prepended with a version ID and Base64 encoded before returned to the caller of the APIs
  • *
* @param refreshTokenBytes The predefined seed value for the access token. This will then be *
    - *
  • Hashed before stored
  • - *
  • Stored in a specific security tokens index
  • - *
  • Prepended with a version ID and Base64 encoded before returned to the caller of the APIs
  • + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after + * {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs + * for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • *
* @param tokenVersion The version of the nodes with which these tokens will be compatible. * @param authentication The authentication object representing the user for which the tokens are created @@ -364,7 +384,7 @@ private void createOAuth2Tokens( } else { refreshTokenToStore = refreshTokenToReturn = null; } - } else { + } else if (tokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { assert accessTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; userTokenId = hashTokenString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); accessTokenToStore = null; @@ -375,6 +395,18 @@ private void createOAuth2Tokens( } else { refreshTokenToStore = refreshTokenToReturn = null; } + } else { + assert accessTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; + userTokenId = Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes); + accessTokenToStore = null; + if (refreshTokenBytes != null) { + assert refreshTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; + refreshTokenToStore = refreshTokenToReturn = Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString( + refreshTokenBytes + ); + } else { + refreshTokenToStore = refreshTokenToReturn = null; + } } UserToken userToken = new UserToken(userTokenId, tokenVersion, tokenAuth, getExpirationTime(), metadata); tokenDocument = createTokenDocument(userToken, accessTokenToStore, refreshTokenToStore, originatingClientAuth); @@ -387,22 +419,23 @@ private void createOAuth2Tokens( final RefreshPolicy tokenCreationRefreshPolicy = tokenVersion.onOrAfter(VERSION_GET_TOKEN_DOC_FOR_REFRESH) ? RefreshPolicy.NONE : RefreshPolicy.WAIT_UNTIL; + final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); logger.debug( () -> format( "Using refresh policy [%s] when creating token doc [%s] in the security index [%s]", tokenCreationRefreshPolicy, documentId, - securityTokensIndex.aliasName() + tokensIndex.aliasName() ) ); - final IndexRequest indexTokenRequest = client.prepareIndex(securityTokensIndex.aliasName()) + final IndexRequest indexTokenRequest = client.prepareIndex(tokensIndex.aliasName()) .setId(documentId) .setOpType(OpType.CREATE) .setSource(tokenDocument, XContentType.JSON) .setRefreshPolicy(tokenCreationRefreshPolicy) .request(); - securityTokensIndex.prepareIndexIfNeededThenExecute( - ex -> listener.onFailure(traceLog("prepare tokens index [" + securityTokensIndex.aliasName() + "]", documentId, ex)), + tokensIndex.prepareIndexIfNeededThenExecute( + ex -> listener.onFailure(traceLog("prepare tokens index [" + tokensIndex.aliasName() + "]", documentId, ex)), () -> executeAsyncWithOrigin( client, SECURITY_ORIGIN, @@ -521,16 +554,17 @@ private void getTokenDocById( @Nullable String storedRefreshToken, ActionListener listener ) { - final SecurityIndexManager frozenTokensIndex = securityTokensIndex.defensiveCopy(); + final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); + final SecurityIndexManager frozenTokensIndex = tokensIndex.defensiveCopy(); if (frozenTokensIndex.isAvailable(PRIMARY_SHARDS) == false) { - logger.warn("failed to get access token [{}] because index [{}] is not available", tokenId, securityTokensIndex.aliasName()); + logger.warn("failed to get access token [{}] because index [{}] is not available", tokenId, tokensIndex.aliasName()); listener.onFailure(frozenTokensIndex.getUnavailableReason(PRIMARY_SHARDS)); return; } - final GetRequest getRequest = client.prepareGet(securityTokensIndex.aliasName(), getTokenDocumentId(tokenId)).request(); + final GetRequest getRequest = client.prepareGet(tokensIndex.aliasName(), getTokenDocumentId(tokenId)).request(); final Consumer onFailure = ex -> listener.onFailure(traceLog("get token from id", tokenId, ex)); - securityTokensIndex.checkIndexVersionThenExecute( - ex -> listener.onFailure(traceLog("prepare tokens index [" + securityTokensIndex.aliasName() + "]", tokenId, ex)), + tokensIndex.checkIndexVersionThenExecute( + ex -> listener.onFailure(traceLog("prepare tokens index [" + tokensIndex.aliasName() + "]", tokenId, ex)), () -> executeAsyncWithOrigin( client.threadPool().getThreadContext(), SECURITY_ORIGIN, @@ -576,11 +610,7 @@ private void getTokenDocById( // if the index or the shard is not there / available we assume that // the token is not valid if (isShardNotAvailableException(e)) { - logger.warn( - "failed to get token doc [{}] because index [{}] is not available", - tokenId, - securityTokensIndex.aliasName() - ); + logger.warn("failed to get token doc [{}] because index [{}] is not available", tokenId, tokensIndex.aliasName()); } else { logger.error(() -> "failed to get token doc [" + tokenId + "]", e); } @@ -620,7 +650,7 @@ void decodeToken(String token, boolean validateUserToken, ActionListener VERSION_ACCESS_TOKENS_UUIDS cluster if (in.available() < MINIMUM_BYTES) { logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BYTES); @@ -630,6 +660,41 @@ void decodeToken(String token, boolean validateUserToken, ActionListener { + if (decodeKey != null) { + try { + final Cipher cipher = getDecryptionCipher(iv, decodeKey, version, decodedSalt); + final String tokenId = decryptTokenId(encryptedTokenId, cipher, version); + getAndValidateUserToken(tokenId, version, null, validateUserToken, listener); + } catch (IOException | GeneralSecurityException e) { + // could happen with a token that is not ours + logger.warn("invalid token", e); + listener.onResponse(null); + } + } else { + // could happen with a token that is not ours + listener.onResponse(null); + } + }, listener::onFailure)); + } else { + logger.debug(() -> format("invalid key %s key: %s", passphraseHash, keyCache.cache.keySet())); + listener.onResponse(null); + } } } catch (Exception e) { // could happen with a token that is not ours @@ -787,7 +852,11 @@ private void indexInvalidation( final Set idsOfOlderTokens = new HashSet<>(); boolean anyOlderTokensBeforeRefreshViaGet = false; for (UserToken userToken : userTokens) { - idsOfRecentTokens.add(userToken.getId()); + if (userToken.getTransportVersion().onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { + idsOfRecentTokens.add(userToken.getId()); + } else { + idsOfOlderTokens.add(userToken.getId()); + } anyOlderTokensBeforeRefreshViaGet |= userToken.getTransportVersion().before(VERSION_GET_TOKEN_DOC_FOR_REFRESH); } final RefreshPolicy tokensInvalidationRefreshPolicy = anyOlderTokensBeforeRefreshViaGet @@ -1055,7 +1124,7 @@ private void findTokenFromRefreshToken(String refreshToken, Iterator ); getTokenDocById(userTokenId, version, null, storedRefreshToken, listener); } - } else { + } else if (version.onOrAfter(VERSION_HASHED_TOKENS)) { final String unencodedRefreshToken = in.readString(); if (unencodedRefreshToken.length() != TOKEN_LENGTH) { logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, version); @@ -1064,6 +1133,9 @@ private void findTokenFromRefreshToken(String refreshToken, Iterator final String hashedRefreshToken = hashTokenString(unencodedRefreshToken); findTokenFromRefreshToken(hashedRefreshToken, securityTokensIndex, backoff, listener); } + } else { + logger.debug("Unrecognized refresh token version [{}].", version); + listener.onResponse(null); } } catch (IOException e) { logger.debug(() -> "Could not decode refresh token [" + refreshToken + "].", e); @@ -1178,6 +1250,7 @@ private void innerRefresh( return; } final RefreshTokenStatus refreshTokenStatus = checkRefreshResult.v1(); + final SecurityIndexManager refreshedTokenIndex = getTokensIndexForVersion(refreshTokenStatus.getTransportVersion()); if (refreshTokenStatus.isRefreshed()) { logger.debug( "Token document [{}] was recently refreshed, when a new token document was generated. Reusing that result.", @@ -1185,29 +1258,31 @@ private void innerRefresh( ); final Tuple parsedTokens = parseTokensFromDocument(tokenDoc.sourceAsMap(), null); Authentication authentication = parsedTokens.v1().getAuthentication(); - decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, securityTokensIndex, authentication, listener); + decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, refreshedTokenIndex, authentication, listener); } else { final TransportVersion newTokenVersion = getTokenVersionCompatibility(); final Tuple newTokenBytes = getRandomTokenBytes(newTokenVersion, true); final Map updateMap = new HashMap<>(); updateMap.put("refreshed", true); - updateMap.put("refresh_time", clock.instant().toEpochMilli()); - try { - final byte[] iv = getRandomBytes(IV_BYTES); - final byte[] salt = getRandomBytes(SALT_BYTES); - String encryptedAccessAndRefreshToken = encryptSupersedingTokens( - newTokenBytes.v1(), - newTokenBytes.v2(), - refreshToken, - iv, - salt - ); - updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); - updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); - updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); - } catch (GeneralSecurityException e) { - logger.warn("could not encrypt access token and refresh token string", e); - onFailure.accept(invalidGrantException("could not refresh the requested token")); + if (newTokenVersion.onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { + updateMap.put("refresh_time", clock.instant().toEpochMilli()); + try { + final byte[] iv = getRandomBytes(IV_BYTES); + final byte[] salt = getRandomBytes(SALT_BYTES); + String encryptedAccessAndRefreshToken = encryptSupersedingTokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + refreshToken, + iv, + salt + ); + updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); + updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); + updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); + } catch (GeneralSecurityException e) { + logger.warn("could not encrypt access token and refresh token string", e); + onFailure.accept(invalidGrantException("could not refresh the requested token")); + } } assert tokenDoc.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO : "expected an assigned sequence number"; assert tokenDoc.primaryTerm() != SequenceNumbers.UNASSIGNED_PRIMARY_TERM : "expected an assigned primary term"; @@ -1218,17 +1293,17 @@ private void innerRefresh( "Using refresh policy [%s] when updating token doc [%s] for refresh in the security index [%s]", tokenRefreshUpdateRefreshPolicy, tokenDoc.id(), - securityTokensIndex.aliasName() + refreshedTokenIndex.aliasName() ) ); - final UpdateRequestBuilder updateRequest = client.prepareUpdate(securityTokensIndex.aliasName(), tokenDoc.id()) + final UpdateRequestBuilder updateRequest = client.prepareUpdate(refreshedTokenIndex.aliasName(), tokenDoc.id()) .setDoc("refresh_token", updateMap) .setFetchSource(logger.isDebugEnabled()) .setRefreshPolicy(tokenRefreshUpdateRefreshPolicy) .setIfSeqNo(tokenDoc.seqNo()) .setIfPrimaryTerm(tokenDoc.primaryTerm()); - securityTokensIndex.prepareIndexIfNeededThenExecute( - ex -> listener.onFailure(traceLog("prepare index [" + securityTokensIndex.aliasName() + "]", ex)), + refreshedTokenIndex.prepareIndexIfNeededThenExecute( + ex -> listener.onFailure(traceLog("prepare index [" + refreshedTokenIndex.aliasName() + "]", ex)), () -> executeAsyncWithOrigin( client.threadPool().getThreadContext(), SECURITY_ORIGIN, @@ -1274,7 +1349,7 @@ private void innerRefresh( if (cause instanceof VersionConflictEngineException) { // The document has been updated by another thread, get it again. logger.debug("version conflict while updating document [{}], attempting to get it again", tokenDoc.id()); - getTokenDocAsync(tokenDoc.id(), securityTokensIndex, true, new ActionListener<>() { + getTokenDocAsync(tokenDoc.id(), refreshedTokenIndex, true, new ActionListener<>() { @Override public void onResponse(GetResponse response) { if (response.isExists()) { @@ -1293,7 +1368,7 @@ public void onFailure(Exception e) { logger.info("could not get token document [{}] for refresh, retrying", tokenDoc.id()); client.threadPool() .schedule( - () -> getTokenDocAsync(tokenDoc.id(), securityTokensIndex, true, this), + () -> getTokenDocAsync(tokenDoc.id(), refreshedTokenIndex, true, this), backoff.next(), client.threadPool().generic() ); @@ -1614,13 +1689,17 @@ private static Optional checkMultipleRefreshes( RefreshTokenStatus refreshTokenStatus ) { if (refreshTokenStatus.isRefreshed()) { - if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { - return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); - } - if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { - return Optional.of( - invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great") - ); + if (refreshTokenStatus.getTransportVersion().onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { + if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { + return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); + } + if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { + return Optional.of( + invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great") + ); + } + } else { + return Optional.of(invalidGrantException("token has already been refreshed")); } } return Optional.empty(); @@ -1900,6 +1979,21 @@ private void ensureEnabled() { } } + /** + * In version {@code #VERSION_TOKENS_INDEX_INTRODUCED} security tokens were moved into a separate index, away from the other entities in + * the main security index, due to their ephemeral nature. They moved "seamlessly" - without manual user intervention. In this way, new + * tokens are created in the new index, while the existing ones were left in place - to be accessed from the old index - and due to be + * removed automatically by the {@code ExpiredTokenRemover} periodic job. Therefore, in general, when searching for a token we need to + * consider both the new and the old indices. + */ + private SecurityIndexManager getTokensIndexForVersion(TransportVersion version) { + if (version.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { + return securityTokensIndex; + } else { + return securityMainIndex; + } + } + public TimeValue getExpirationDelay() { return expirationDelay; } @@ -1928,13 +2022,41 @@ public String prependVersionAndEncodeAccessToken(TransportVersion version, byte[ out.writeByteArray(accessTokenBytes); return Base64.getEncoder().encodeToString(out.bytes().toBytesRef().bytes); } - } else { + } else if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { try (BytesStreamOutput out = new BytesStreamOutput(MINIMUM_BASE64_BYTES)) { out.setTransportVersion(version); TransportVersion.writeVersion(version, out); out.writeString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); return Base64.getEncoder().encodeToString(out.bytes().toBytesRef().bytes); } + } else { + // we know that the minimum length is larger than the default of the ByteArrayOutputStream so set the size to this explicitly + try ( + ByteArrayOutputStream os = new ByteArrayOutputStream(LEGACY_MINIMUM_BASE64_BYTES); + OutputStream base64 = Base64.getEncoder().wrap(os); + StreamOutput out = new OutputStreamStreamOutput(base64) + ) { + out.setTransportVersion(version); + KeyAndCache keyAndCache = keyCache.activeKeyCache; + TransportVersion.writeVersion(version, out); + out.writeByteArray(keyAndCache.getSalt().bytes); + out.writeByteArray(keyAndCache.getKeyHash().bytes); + final byte[] initializationVector = getRandomBytes(IV_BYTES); + out.writeByteArray(initializationVector); + try ( + CipherOutputStream encryptedOutput = new CipherOutputStream( + out, + getEncryptionCipher(initializationVector, keyAndCache, version) + ); + StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput) + ) { + encryptedStreamOutput.setTransportVersion(version); + encryptedStreamOutput.writeString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); + // StreamOutput needs to be closed explicitly because it wraps CipherOutputStream + encryptedStreamOutput.close(); + return new String(os.toByteArray(), StandardCharsets.UTF_8); + } + } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 702af75141093..75c2507a1dc5f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -126,6 +126,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; @@ -147,6 +148,7 @@ public class TokenServiceTests extends ESTestCase { private SecurityIndexManager securityMainIndex; private SecurityIndexManager securityTokensIndex; private ClusterService clusterService; + private DiscoveryNode pre72OldNode; private DiscoveryNode pre8500040OldNode; private Settings tokenServiceEnabledSettings = Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) @@ -226,12 +228,31 @@ public void setupClient() { licenseState = mock(MockLicenseState.class); when(licenseState.isAllowed(Security.TOKEN_SERVICE_FEATURE)).thenReturn(true); + if (randomBoolean()) { + // version 7.2 was an "inflection" point in the Token Service development (access_tokens as UUIDS, multiple concurrent + // refreshes, + // tokens docs on a separate index) + pre72OldNode = addAnother7071DataNode(this.clusterService); + } if (randomBoolean()) { // before refresh tokens used GET, i.e. TokenService#VERSION_GET_TOKEN_DOC_FOR_REFRESH pre8500040OldNode = addAnotherPre8500DataNode(this.clusterService); } } + private static DiscoveryNode addAnother7071DataNode(ClusterService clusterService) { + Version version; + TransportVersion transportVersion; + if (randomBoolean()) { + version = Version.V_7_0_0; + transportVersion = TransportVersions.V_7_0_0; + } else { + version = Version.V_7_1_0; + transportVersion = TransportVersions.V_7_1_0; + } + return addAnotherDataNodeWithVersion(clusterService, version, transportVersion); + } + private static DiscoveryNode addAnotherPre8500DataNode(ClusterService clusterService) { Version version; TransportVersion transportVersion; @@ -280,6 +301,53 @@ public static void shutdownThreadpool() { threadPool = null; } + public void testAttachAndGetToken() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Token Service Key is used (to encrypt tokens) + if (null == pre72OldNode) { + pre72OldNode = addAnother7071DataNode(this.clusterService); + } + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + PlainActionFuture tokenFuture = new PlainActionFuture<>(); + Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + authentication, + authentication, + Collections.emptyMap(), + tokenFuture + ); + final String accessToken = tokenFuture.get().getAccessToken(); + assertNotNull(accessToken); + mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), authentication, false, null); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + accessToken); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + tokenService.tryAuthenticateToken(bearerToken, future); + UserToken serialized = future.get(); + assertAuthentication(authentication, serialized.getAuthentication()); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + // verify a second separate token service with its own salt can also verify + TokenService anotherService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + anotherService.refreshMetadata(tokenService.getTokenMetadata()); + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + anotherService.tryAuthenticateToken(bearerToken, future); + UserToken fromOtherService = future.get(); + assertAuthentication(authentication, fromOtherService.getAuthentication()); + } + } + public void testInvalidAuthorizationHeader() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); @@ -296,6 +364,89 @@ public void testInvalidAuthorizationHeader() throws Exception { } } + public void testPassphraseWorks() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == pre72OldNode) { + pre72OldNode = addAnother7071DataNode(this.clusterService); + } + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + PlainActionFuture tokenFuture = new PlainActionFuture<>(); + Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + authentication, + authentication, + Collections.emptyMap(), + tokenFuture + ); + final String accessToken = tokenFuture.get().getAccessToken(); + assertNotNull(accessToken); + mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), authentication, false, null); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + storeTokenHeader(requestContext, accessToken); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + tokenService.tryAuthenticateToken(bearerToken, future); + UserToken serialized = future.get(); + assertAuthentication(authentication, serialized.getAuthentication()); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + // verify a second separate token service with its own passphrase cannot verify + TokenService anotherService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + anotherService.tryAuthenticateToken(bearerToken, future); + assertNull(future.get()); + } + } + + public void testGetTokenWhenKeyCacheHasExpired() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == pre72OldNode) { + pre72OldNode = addAnother7071DataNode(this.clusterService); + } + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + + PlainActionFuture tokenFuture = new PlainActionFuture<>(); + Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + authentication, + authentication, + Collections.emptyMap(), + tokenFuture + ); + String accessToken = tokenFuture.get().getAccessToken(); + assertThat(accessToken, notNullValue()); + + tokenService.clearActiveKeyCache(); + + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + authentication, + authentication, + Collections.emptyMap(), + tokenFuture + ); + accessToken = tokenFuture.get().getAccessToken(); + assertThat(accessToken, notNullValue()); + } + public void testAuthnWithInvalidatedToken() throws Exception { when(securityMainIndex.indexExists()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); @@ -669,6 +820,57 @@ public void testMalformedRefreshTokens() throws Exception { } } + public void testNonExistingPre72Token() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // mock another random token so that we don't find a token in TokenService#getUserTokenFromId + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + mockGetTokenFromAccessTokenBytes(tokenService, tokenService.getRandomTokenBytes(randomBoolean()).v1(), authentication, false, null); + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + storeTokenHeader( + requestContext, + tokenService.prependVersionAndEncodeAccessToken( + TransportVersions.V_7_1_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_1_0, randomBoolean()).v1() + ) + ); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + tokenService.tryAuthenticateToken(bearerToken, future); + assertNull(future.get()); + } + } + + public void testNonExistingUUIDToken() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // mock another random token so that we don't find a token in TokenService#getUserTokenFromId + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + mockGetTokenFromAccessTokenBytes(tokenService, tokenService.getRandomTokenBytes(randomBoolean()).v1(), authentication, false, null); + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + TransportVersion uuidTokenVersion = randomFrom(TransportVersions.V_7_2_0, TransportVersions.V_7_3_2); + storeTokenHeader( + requestContext, + tokenService.prependVersionAndEncodeAccessToken( + uuidTokenVersion, + tokenService.getRandomTokenBytes(uuidTokenVersion, randomBoolean()).v1() + ) + ); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + tokenService.tryAuthenticateToken(bearerToken, future); + assertNull(future.get()); + } + } + public void testNonExistingLatestTokenVersion() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // mock another random token so that we don't find a token in TokenService#getUserTokenFromId @@ -723,11 +925,18 @@ public void testIndexNotAvailable() throws Exception { return Void.TYPE; }).when(client).get(any(GetRequest.class), anyActionListener()); - final SecurityIndexManager tokensIndex = securityTokensIndex; - when(securityMainIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); - when(securityMainIndex.indexExists()).thenReturn(false); - when(securityMainIndex.defensiveCopy()).thenReturn(securityMainIndex); - + final SecurityIndexManager tokensIndex; + if (pre72OldNode != null) { + tokensIndex = securityMainIndex; + when(securityTokensIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); + when(securityTokensIndex.indexExists()).thenReturn(false); + when(securityTokensIndex.defensiveCopy()).thenReturn(securityTokensIndex); + } else { + tokensIndex = securityTokensIndex; + when(securityMainIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); + when(securityMainIndex.indexExists()).thenReturn(false); + when(securityMainIndex.defensiveCopy()).thenReturn(securityMainIndex); + } try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { PlainActionFuture future = new PlainActionFuture<>(); final SecureString bearerToken3 = Authenticator.extractBearerTokenFromHeader(requestContext); @@ -779,6 +988,7 @@ public void testGetAuthenticationWorksWithExpiredUserToken() throws Exception { } public void testSupersedingTokenEncryption() throws Exception { + assumeTrue("Superseding tokens are only created in post 7.2 clusters", pre72OldNode == null); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = AuthenticationTests.randomAuthentication(null, null); PlainActionFuture tokenFuture = new PlainActionFuture<>(); @@ -813,11 +1023,13 @@ public void testSupersedingTokenEncryption() throws Exception { authentication, tokenFuture ); - - assertThat( - tokenService.prependVersionAndEncodeAccessToken(version, newTokenBytes.v1()), - equalTo(tokenFuture.get().getAccessToken()) - ); + if (version.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + // previous versions serialized the access token encrypted and the cipher text was different each time (due to different IVs) + assertThat( + tokenService.prependVersionAndEncodeAccessToken(version, newTokenBytes.v1()), + equalTo(tokenFuture.get().getAccessToken()) + ); + } assertThat( TokenService.prependVersionAndEncodeRefreshToken(version, newTokenBytes.v2()), equalTo(tokenFuture.get().getRefreshToken()) @@ -946,8 +1158,10 @@ public static String tokenDocIdFromAccessTokenBytes(byte[] accessTokenBytes, Tra MessageDigest userTokenIdDigest = sha256(); userTokenIdDigest.update(accessTokenBytes, RAW_TOKEN_BYTES_LENGTH, RAW_TOKEN_DOC_ID_BYTES_LENGTH); return Base64.getUrlEncoder().withoutPadding().encodeToString(userTokenIdDigest.digest()); - } else { + } else if (tokenVersion.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { return TokenService.hashTokenString(Base64.getUrlEncoder().withoutPadding().encodeToString(accessTokenBytes)); + } else { + return Base64.getUrlEncoder().withoutPadding().encodeToString(accessTokenBytes); } } @@ -964,9 +1178,12 @@ private void mockTokenForRefreshToken( if (userToken.getTransportVersion().onOrAfter(VERSION_GET_TOKEN_DOC_FOR_REFRESH)) { storedAccessToken = Base64.getUrlEncoder().withoutPadding().encodeToString(sha256().digest(accessTokenBytes)); storedRefreshToken = Base64.getUrlEncoder().withoutPadding().encodeToString(sha256().digest(refreshTokenBytes)); - } else { + } else if (userToken.getTransportVersion().onOrAfter(TokenService.VERSION_HASHED_TOKENS)) { storedAccessToken = null; storedRefreshToken = TokenService.hashTokenString(Base64.getUrlEncoder().withoutPadding().encodeToString(refreshTokenBytes)); + } else { + storedAccessToken = null; + storedRefreshToken = Base64.getUrlEncoder().withoutPadding().encodeToString(refreshTokenBytes); } final RealmRef realmRef = new RealmRef( refreshTokenStatus == null ? randomAlphaOfLength(6) : refreshTokenStatus.getAssociatedRealm(), From dadf875bdf2267e50b12cc6455b3ee1fa3365023 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 18 Dec 2024 18:11:14 +0100 Subject: [PATCH 03/62] Push removal of search workers pool setting to v10 (#118877) The search workers thread pool has been removed in 8.16. We still support parsing its size and queue size settings, to prevent issues upon upgrade for users that may have customized them. We will provide such compatibility for the entire 9.x series. --- .../main/java/org/elasticsearch/node/NodeConstruction.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 17e56a392daff..5cfe1c104d45e 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -83,7 +83,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -553,9 +553,10 @@ private SettingsModule validateSettings(Settings envSettings, Settings settings, return settingsModule; } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) + @UpdateForV10(owner = UpdateForV10.Owner.SEARCH_FOUNDATIONS) private static void addBwcSearchWorkerSettings(List> additionalSettings) { - // TODO remove the below settings, they are unused and only here to enable BwC for deployments that still use them + // Search workers thread pool has been removed in Elasticsearch 8.16.0. These settings are deprecated and take no effect. + // They are here only to enable BwC for deployments that still use them additionalSettings.add( Setting.intSetting("thread_pool.search_worker.queue_size", 0, Setting.Property.NodeScope, Setting.Property.DeprecatedWarning) ); From 76b2968360dc4e71309e3b43dc18e6f41a142651 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Wed, 18 Dec 2024 18:30:58 +0100 Subject: [PATCH 04/62] Disable SLM history in docs tests (#118979) The SLM history data stream was causing issues in the docs tests because its presence was flaky and could result in the inability to remove its index template, which in turn resulted in failing tests. --- docs/build.gradle | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/build.gradle b/docs/build.gradle index dec0de8ffa844..93b7277327280 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -130,8 +130,9 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' - // disable the ILM history for doc tests to avoid potential lingering tasks that'd cause test flakiness + // disable the ILM and SLM history for doc tests to avoid potential lingering tasks that'd cause test flakiness setting 'indices.lifecycle.history_index_enabled', 'false' + setting 'slm.history_index_enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.authc.realms.file.file.order', '0' setting 'xpack.security.authc.realms.native.native.order', '1' From 5663efa5e344397d423eefd52d6a06e4fda9b70e Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 18 Dec 2024 18:38:54 +0100 Subject: [PATCH 05/62] Just mute the bad apple (#118989) --- muted-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/muted-tests.yml b/muted-tests.yml index f534f24718f52..a06334146ed7b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -289,6 +289,7 @@ tests: method: testThreadContext issue: https://github.com/elastic/elasticsearch/issues/118914 - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + method: test {yaml=indices.create/20_synthetic_source/create index with use_synthetic_source} issue: https://github.com/elastic/elasticsearch/issues/118955 - class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT issue: https://github.com/elastic/elasticsearch/issues/118970 From e741fd62cd70016bc70c49b87def3397a4c48ed1 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 18 Dec 2024 19:30:36 +0100 Subject: [PATCH 06/62] [Build] Build hdfs fixture faster and less (#118801) The building of the shadowed hdfs2 and hdfs3 fixtures takes quite long time due to being 51 and 80mb in size. By removing non used dependencies from the shadow jar creation we can speed up this significantly. Also we avoid building hdfs fixture jars now for compile only (resulting in no shadow jar creation for precommit checks) --- test/fixtures/hdfs-fixture/build.gradle | 81 ++++++++++++++----- .../searchable-snapshots/qa/hdfs/build.gradle | 3 +- .../qa/hdfs/build.gradle | 3 +- 3 files changed, 66 insertions(+), 21 deletions(-) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 9dc0263f49aee..8296bc14fd665 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -10,12 +10,10 @@ apply plugin: 'elasticsearch.java' apply plugin: 'com.gradleup.shadow' + import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar configurations { -// all { -// transitive = true -// } hdfs2 hdfs3 consumable("shadowedHdfs2") @@ -27,20 +25,76 @@ dependencies { transitive false } compileOnly "junit:junit:${versions.junit}" - hdfs2 "org.apache.hadoop:hadoop-minicluster:2.8.5" - hdfs3 "org.apache.hadoop:hadoop-minicluster:3.3.1" + def commonExcludes = [ + [group: "org.apache.commons", module: "commons-compress"], + [group: "org.apache.hadoop", module: "hadoop-mapreduce-client-app"], + [group: "org.apache.hadoop", module: "hadoop-mapreduce-client-core"], + [group: "org.apache.hadoop", module: "hadoop-mapreduce-client-hs"], + [group: "org.apache.hadoop", module: "hadoop-mapreduce-client-jobclient"], + [group: "org.apache.hadoop", module: "hadoop-yarn-server-tests"], + [group: "org.apache.httpcomponents", module: "httpclient"], + [group: "org.apache.zookeeper", module: "zookeeper"], + [group: "org.apache.curator", module: "curator-recipes"], + [group: "org.apache.curator", module: "curator-client"], + [group: "org.apache.curator", module: "curator-framework"], + [group: "org.apache.avro", module: "avro"], + [group: "log4j", module: "log4j"], + [group: "io.netty", module: "netty-all"], + [group: "io.netty", module: "netty"], + [group: "com.squareup.okhttp", module: "okhttp"], + [group: "com.google.guava", module: "guava"], + [group: "com.google.code.gson", module: "gson"], + [group: "javax.servlet.jsp", module: "jsp-api"], + [group: "org.fusesource.leveldbjni", module: "leveldbjni-all"], + [group: "commons-cli", module: "commons-cli"], + [group: "org.mortbay.jetty", module: "servlet-api"], + [group: "commons-logging", module: "commons-logging"], + [group: "org.slf4j", module: "slf4j-log4j12"], + [group: "commons-codec", module: "commons-codec"], + [group: "com.sun.jersey", module: "jersey-core"], + [group: "com.sun.jersey", module: "jersey-json"], + [group: "com.google.code.findbugs", module: "jsr305"], + [group: "com.sun.jersey", module: "jersey-json"], + [group: "com.nimbusds", module: "nimbus-jose-jwt"], + [group: "com.jcraft", module: "jsch"], + [group: "org.slf4j", module: "slf4j-api"], + ] + + hdfs2("org.apache.hadoop:hadoop-minicluster:2.8.5") { + commonExcludes.each { exclude it } + exclude group: "org.apache.commons", module: "commons-math3" + exclude group: "xmlenc", module: "xmlenc" + exclude group: "net.java.dev.jets3t", module: "jets3t" + exclude group: "org.apache.directory.server", module: "apacheds-i18n" + exclude group: "xerces", module: "xercesImpl" + } + + hdfs3("org.apache.hadoop:hadoop-minicluster:3.3.1") { + commonExcludes.each { exclude it } + exclude group: "dnsjava", module: "dnsjava" + exclude group: "com.google.inject.extensions", module: "guice-servlet" + exclude group: "com.google.inject", module: "guice" + exclude group: "com.microsoft.sqlserver", module: "mssql-jdbc" + exclude group: "com.sun.jersey.contribs", module: "jersey-guice" + exclude group: "com.zaxxer", module: "HikariCP-java7" + exclude group: "com.sun.jersey", module: "jersey-server" + exclude group: "org.bouncycastle", module: "bcpkix-jdk15on" + exclude group: "org.bouncycastle", module: "bcprov-jdk15on" + exclude group: "org.ehcache", module: "ehcache" + exclude group: "org.apache.geronimo.specs", module: "geronimo-jcache_1.0_spec" + exclude group: "org.xerial.snappy", module: "snappy-java" + } } tasks.named("shadowJar").configure { archiveClassifier.set("hdfs3") // fix issues with signed jars - relocate("org.apache.hadoop", "fixture.hdfs3.org.apache.hadoop") { exclude "org.apache.hadoop.hdfs.protocol.ClientProtocol" exclude "org.apache.hadoop.ipc.StandbyException" } - configurations << project.configurations.hdfs3 + configurations.add(project.configurations.hdfs3) } def hdfs2Jar = tasks.register("hdfs2jar", ShadowJar) { @@ -50,26 +104,15 @@ def hdfs2Jar = tasks.register("hdfs2jar", ShadowJar) { } archiveClassifier.set("hdfs2") from sourceSets.main.output - configurations << project.configurations.hdfs2 + configurations.add(project.configurations.hdfs2) } tasks.withType(ShadowJar).configureEach { dependencies { -// exclude(dependency('commons-io:commons-io:2.8.0')) exclude(dependency("com.carrotsearch.randomizedtesting:randomizedtesting-runner:.*")) exclude(dependency("junit:junit:.*")) - exclude(dependency("org.slf4j:slf4j-api:.*")) - exclude(dependency("com.google.guava:guava:.*")) - exclude(dependency("org.apache.commons:commons-compress:.*")) - exclude(dependency("commons-logging:commons-logging:.*")) - exclude(dependency("commons-codec:commons-codec:.*")) - exclude(dependency("org.apache.httpcomponents:httpclient:.*")) exclude(dependency("org.apache.httpcomponents:httpcore:.*")) exclude(dependency("org.apache.logging.log4j:log4j-1.2-api:.*")) - exclude(dependency("log4j:log4j:.*")) - exclude(dependency("io.netty:.*:.*")) - exclude(dependency("com.nimbusds:nimbus-jose-jwt:.*")) - exclude(dependency("commons-cli:commons-cli:1.2")) exclude(dependency("net.java.dev.jna:jna:.*")) exclude(dependency("org.objenesis:objenesis:.*")) exclude(dependency('com.fasterxml.jackson.core:.*:.*')) diff --git a/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle b/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle index b41e0f8dcc1cf..4577935e4e08d 100644 --- a/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle @@ -12,7 +12,8 @@ apply plugin: 'elasticsearch.internal-available-ports' dependencies { clusterPlugins project(':plugins:repository-hdfs') javaRestTestImplementation(testArtifact(project(xpackModule('searchable-snapshots')))) - javaRestTestImplementation project(path: ':test:fixtures:hdfs-fixture', configuration:"shadowedHdfs2") + javaRestTestCompileOnly project(path: ':test:fixtures:hdfs-fixture') + javaRestTestRuntimeOnly project(path: ':test:fixtures:hdfs-fixture', configuration:"shadowedHdfs2") javaRestTestImplementation project(':test:fixtures:krb5kdc-fixture') javaRestTestRuntimeOnly "com.google.guava:guava:16.0.1" javaRestTestRuntimeOnly "commons-cli:commons-cli:1.2" diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle index 81eb82a522389..d4615260952d1 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle @@ -10,7 +10,8 @@ apply plugin: 'elasticsearch.rest-resources' dependencies { javaRestTestImplementation testArtifact(project(xpackModule('snapshot-repo-test-kit'))) - javaRestTestImplementation project(path: ':test:fixtures:hdfs-fixture', configuration:"shadow") + javaRestTestCompileOnly project(path: ':test:fixtures:hdfs-fixture') + javaRestTestRuntimeOnly project(path: ':test:fixtures:hdfs-fixture', configuration:"shadow") javaRestTestImplementation project(':test:fixtures:krb5kdc-fixture') javaRestTestImplementation "org.slf4j:slf4j-api:${versions.slf4j}" javaRestTestImplementation "org.slf4j:slf4j-simple:${versions.slf4j}" From 3a15d0b0840c911654d7affb779fc3c814e0b646 Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Wed, 18 Dec 2024 12:45:40 -0600 Subject: [PATCH 07/62] Mute test --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a06334146ed7b..81480e89d1e8b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -293,6 +293,8 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/118955 - class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT issue: https://github.com/elastic/elasticsearch/issues/118970 +- class: org.elasticsearch.xpack.migrate.action.ReindexDatastreamIndexTransportActionIT + issue: https://github.com/elastic/elasticsearch/issues/119002 # Examples: # From fab3bff84fb3648cee59e1f5ac8a21dbb1532a66 Mon Sep 17 00:00:00 2001 From: Blake Niemyjski Date: Wed, 18 Dec 2024 12:47:45 -0600 Subject: [PATCH 08/62] Update jvm.options (#118716) --- distribution/src/config/jvm.options | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index f55d90933ed61..94fc6f2cb9025 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -9,7 +9,7 @@ ## should create one or more files in the jvm.options.d ## directory containing your adjustments. ## -## See https://www.elastic.co/guide/en/elasticsearch/reference/@project.minor.version@/jvm-options.html +## See https://www.elastic.co/guide/en/elasticsearch/reference/@project.minor.version@/advanced-configuration.html#set-jvm-options ## for more information. ## ################################################################ From 65faabd08d79043d10a0351e57f1b0c6239862da Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Wed, 18 Dec 2024 11:50:18 -0700 Subject: [PATCH 09/62] Refactor pausable field plugin to have common codebase (#118909) --- .../action/AbstractPausableIntegTestCase.java | 62 +------------ .../esql/action/AbstractPauseFieldPlugin.java | 86 +++++++++++++++++++ .../esql/action/CrossClusterAsyncQueryIT.java | 72 +--------------- .../action/CrossClustersCancellationIT.java | 80 ++--------------- .../esql/action/SimplePauseFieldPlugin.java | 36 ++++++++ 5 files changed, 136 insertions(+), 200 deletions(-) create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPauseFieldPlugin.java create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SimplePauseFieldPlugin.java diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java index 8de65847c3f85..8054b260f0060 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java @@ -10,26 +10,15 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.engine.SegmentsStats; -import org.elasticsearch.index.mapper.OnScriptError; -import org.elasticsearch.logging.LogManager; -import org.elasticsearch.logging.Logger; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; import java.io.IOException; import java.util.Collection; -import java.util.Map; -import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -40,8 +29,6 @@ */ public abstract class AbstractPausableIntegTestCase extends AbstractEsqlIntegTestCase { - private static final Logger LOGGER = LogManager.getLogger(AbstractPausableIntegTestCase.class); - protected static final Semaphore scriptPermits = new Semaphore(0); protected int pageSize = -1; @@ -108,53 +95,10 @@ public void setupIndex() throws IOException { } } - public static class PausableFieldPlugin extends Plugin implements ScriptPlugin { - + public static class PausableFieldPlugin extends AbstractPauseFieldPlugin { @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - public String getType() { - return "pause"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - try { - assertTrue(scriptPermits.tryAcquire(1, TimeUnit.MINUTES)); - } catch (Exception e) { - throw new AssertionError(e); - } - LOGGER.debug("--> emitting value"); - emit(1); - } - }; - } - }; - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; + protected boolean onWait() throws InterruptedException { + return scriptPermits.tryAcquire(1, TimeUnit.MINUTES); } } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPauseFieldPlugin.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPauseFieldPlugin.java new file mode 100644 index 0000000000000..5554f7e571dfb --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPauseFieldPlugin.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.OnScriptError; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.LongFieldScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.lookup.SearchLookup; + +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import static org.junit.Assert.assertTrue; + +/** + * A plugin that provides a script language "pause" that can be used to simulate slow running queries. + * See also {@link AbstractPausableIntegTestCase}. + */ +public abstract class AbstractPauseFieldPlugin extends Plugin implements ScriptPlugin { + + // Called when the engine enters the execute() method. + protected void onStartExecute() {} + + // Called when the engine needs to wait for further execution to be allowed. + protected abstract boolean onWait() throws InterruptedException; + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new ScriptEngine() { + @Override + public String getType() { + return "pause"; + } + + @Override + @SuppressWarnings("unchecked") + public FactoryType compile( + String name, + String code, + ScriptContext context, + Map params + ) { + if (context == LongFieldScript.CONTEXT) { + return (FactoryType) new LongFieldScript.Factory() { + @Override + public LongFieldScript.LeafFactory newFactory( + String fieldName, + Map params, + SearchLookup searchLookup, + OnScriptError onScriptError + ) { + return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { + @Override + public void execute() { + onStartExecute(); + try { + assertTrue(onWait()); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + emit(1); + } + }; + } + }; + } + throw new IllegalStateException("unsupported type " + context); + } + + @Override + public Set> getSupportedContexts() { + return Set.of(LongFieldScript.CONTEXT); + } + }; + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java index a2bba19db50fc..3926ea4c27a3d 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java @@ -19,14 +19,8 @@ import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.transport.RemoteClusterAware; @@ -44,7 +38,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -80,7 +73,7 @@ protected Collection> nodePlugins(String clusterAlias) { plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(EsqlAsyncActionIT.LocalStateEsqlAsync.class); // allows the async_search DELETE action plugins.add(InternalExchangePlugin.class); - plugins.add(PauseFieldPlugin.class); + plugins.add(SimplePauseFieldPlugin.class); return plugins; } @@ -99,64 +92,7 @@ public List> getSettings() { @Before public void resetPlugin() { - PauseFieldPlugin.allowEmitting = new CountDownLatch(1); - PauseFieldPlugin.startEmitting = new CountDownLatch(1); - } - - public static class PauseFieldPlugin extends Plugin implements ScriptPlugin { - public static CountDownLatch startEmitting = new CountDownLatch(1); - public static CountDownLatch allowEmitting = new CountDownLatch(1); - - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - - public String getType() { - return "pause"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - if (context == LongFieldScript.CONTEXT) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - startEmitting.countDown(); - try { - assertTrue(allowEmitting.await(30, TimeUnit.SECONDS)); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - emit(1); - } - }; - } - }; - } - throw new IllegalStateException("unsupported type " + context); - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; - } + SimplePauseFieldPlugin.resetPlugin(); } /** @@ -184,7 +120,7 @@ public void testSuccessfulPathways() throws Exception { } // wait until we know that the query against 'remote-b:blocking' has started - PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS); + SimplePauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS); // wait until the query of 'cluster-a:logs-*' has finished (it is not blocked since we are not searching the 'blocking' index on it) assertBusy(() -> { @@ -234,7 +170,7 @@ public void testSuccessfulPathways() throws Exception { } // allow remoteB query to proceed - PauseFieldPlugin.allowEmitting.countDown(); + SimplePauseFieldPlugin.allowEmitting.countDown(); // wait until both remoteB and local queries have finished assertBusy(() -> { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java index 17f5f81486651..cfe6fdeccb190 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -15,18 +15,11 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.compute.operator.DriverTaskRunner; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.transport.TransportService; @@ -38,9 +31,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; @@ -63,7 +53,7 @@ protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(InternalExchangePlugin.class); - plugins.add(PauseFieldPlugin.class); + plugins.add(SimplePauseFieldPlugin.class); return plugins; } @@ -82,63 +72,7 @@ public List> getSettings() { @Before public void resetPlugin() { - PauseFieldPlugin.allowEmitting = new CountDownLatch(1); - PauseFieldPlugin.startEmitting = new CountDownLatch(1); - } - - public static class PauseFieldPlugin extends Plugin implements ScriptPlugin { - public static CountDownLatch startEmitting = new CountDownLatch(1); - public static CountDownLatch allowEmitting = new CountDownLatch(1); - - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - public String getType() { - return "pause"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - if (context == LongFieldScript.CONTEXT) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - startEmitting.countDown(); - try { - assertTrue(allowEmitting.await(30, TimeUnit.SECONDS)); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - emit(1); - } - }; - } - }; - } - throw new IllegalStateException("unsupported type " + context); - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; - } + SimplePauseFieldPlugin.resetPlugin(); } private void createRemoteIndex(int numDocs) throws Exception { @@ -169,7 +103,7 @@ public void testCancel() throws Exception { request.pragmas(randomPragmas()); PlainActionFuture requestFuture = new PlainActionFuture<>(); client().execute(EsqlQueryAction.INSTANCE, request, requestFuture); - assertTrue(PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); + assertTrue(SimplePauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); List rootTasks = new ArrayList<>(); assertBusy(() -> { List tasks = client().admin().cluster().prepareListTasks().setActions(EsqlQueryAction.NAME).get().getTasks(); @@ -192,7 +126,7 @@ public void testCancel() throws Exception { } }); } finally { - PauseFieldPlugin.allowEmitting.countDown(); + SimplePauseFieldPlugin.allowEmitting.countDown(); } Exception error = expectThrows(Exception.class, requestFuture::actionGet); assertThat(error.getMessage(), containsString("proxy timeout")); @@ -223,7 +157,7 @@ public void testSameRemoteClusters() throws Exception { assertThat(tasks, hasSize(moreClusters + 1)); }); } finally { - PauseFieldPlugin.allowEmitting.countDown(); + SimplePauseFieldPlugin.allowEmitting.countDown(); } try (EsqlQueryResponse resp = future.actionGet(30, TimeUnit.SECONDS)) { // TODO: This produces incorrect results because data on the remote cluster is processed multiple times. @@ -244,7 +178,7 @@ public void testTasks() throws Exception { request.query("FROM *:test | STATS total=sum(const) | LIMIT 1"); request.pragmas(randomPragmas()); ActionFuture requestFuture = client().execute(EsqlQueryAction.INSTANCE, request); - assertTrue(PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); + assertTrue(SimplePauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); try { assertBusy(() -> { List clusterTasks = client(REMOTE_CLUSTER).admin() @@ -270,7 +204,7 @@ public void testTasks() throws Exception { \\_ExchangeSinkOperator""")); }); } finally { - PauseFieldPlugin.allowEmitting.countDown(); + SimplePauseFieldPlugin.allowEmitting.countDown(); } requestFuture.actionGet(30, TimeUnit.SECONDS).close(); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SimplePauseFieldPlugin.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SimplePauseFieldPlugin.java new file mode 100644 index 0000000000000..3ba73dd9a402e --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SimplePauseFieldPlugin.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * A plugin that provides a script language "pause" that can be used to simulate slow running queries. + * This implementation allows to know when it arrives at execute() via startEmitting and to allow the execution to proceed + * via allowEmitting. + */ +public class SimplePauseFieldPlugin extends AbstractPauseFieldPlugin { + public static CountDownLatch startEmitting = new CountDownLatch(1); + public static CountDownLatch allowEmitting = new CountDownLatch(1); + + public static void resetPlugin() { + allowEmitting = new CountDownLatch(1); + startEmitting = new CountDownLatch(1); + } + + @Override + public void onStartExecute() { + startEmitting.countDown(); + } + + @Override + public boolean onWait() throws InterruptedException { + return allowEmitting.await(30, TimeUnit.SECONDS); + } +} From 9cc362b9667f0903b1f124968cfb817cb56c86aa Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Wed, 18 Dec 2024 14:08:51 -0500 Subject: [PATCH 10/62] Entitlements: More robust frame skipping (#118983) * More robust frame skipping * Cosmetic improvements for clarity * Explicit set of runtime classes * Pass entitlements runtime module to PolicyManager ctor * Use the term "entitlements module" and filter instead of dropWhile * [CI] Auto commit changes from spotless --------- Co-authored-by: elasticsearchmachine --- .../EntitlementInitialization.java | 3 +- .../runtime/policy/PolicyManager.java | 71 ++++++++++---- .../runtime/policy/PolicyManagerTests.java | 94 +++++++++++++++++-- 3 files changed, 140 insertions(+), 28 deletions(-) diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index 9118f67cdc145..8e4cddc4d63ee 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -53,6 +53,7 @@ public class EntitlementInitialization { private static final String POLICY_FILE_NAME = "entitlement-policy.yaml"; + private static final Module ENTITLEMENTS_MODULE = PolicyManager.class.getModule(); private static ElasticsearchEntitlementChecker manager; @@ -92,7 +93,7 @@ private static PolicyManager createPolicyManager() throws IOException { "server", List.of(new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))) ); - return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver()); + return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver(), ENTITLEMENTS_MODULE); } private static Map createPluginPolicies(Collection pluginData) throws IOException { diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index 8d3efe4eb98e6..74ba986041dac 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -15,6 +15,7 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import java.lang.StackWalker.StackFrame; import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; import java.util.ArrayList; @@ -29,6 +30,10 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.lang.StackWalker.Option.RETAIN_CLASS_REFERENCE; +import static java.util.Objects.requireNonNull; +import static java.util.function.Predicate.not; + public class PolicyManager { private static final Logger logger = LogManager.getLogger(ElasticsearchEntitlementChecker.class); @@ -63,6 +68,11 @@ public Stream getEntitlements(Class entitlementCla private static final Set systemModules = findSystemModules(); + /** + * Frames originating from this module are ignored in the permission logic. + */ + private final Module entitlementsModule; + private static Set findSystemModules() { var systemModulesDescriptors = ModuleFinder.ofSystem() .findAll() @@ -77,13 +87,18 @@ private static Set findSystemModules() { .collect(Collectors.toUnmodifiableSet()); } - public PolicyManager(Policy defaultPolicy, Map pluginPolicies, Function, String> pluginResolver) { - this.serverEntitlements = buildScopeEntitlementsMap(Objects.requireNonNull(defaultPolicy)); - this.pluginsEntitlements = Objects.requireNonNull(pluginPolicies) - .entrySet() + public PolicyManager( + Policy defaultPolicy, + Map pluginPolicies, + Function, String> pluginResolver, + Module entitlementsModule + ) { + this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(defaultPolicy)); + this.pluginsEntitlements = requireNonNull(pluginPolicies).entrySet() .stream() .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); this.pluginResolver = pluginResolver; + this.entitlementsModule = entitlementsModule; } private static Map> buildScopeEntitlementsMap(Policy policy) { @@ -185,7 +200,16 @@ private static boolean isServerModule(Module requestingModule) { return requestingModule.isNamed() && requestingModule.getLayer() == ModuleLayer.boot(); } - private static Module requestingModule(Class callerClass) { + /** + * Walks the stack to determine which module's entitlements should be checked. + * + * @param callerClass when non-null will be used if its module is suitable; + * this is a fast-path check that can avoid the stack walk + * in cases where the caller class is available. + * @return the requesting module, or {@code null} if the entire call stack + * comes from modules that are trusted. + */ + Module requestingModule(Class callerClass) { if (callerClass != null) { Module callerModule = callerClass.getModule(); if (systemModules.contains(callerModule) == false) { @@ -193,21 +217,34 @@ private static Module requestingModule(Class callerClass) { return callerModule; } } - int framesToSkip = 1 // getCallingClass (this method) - + 1 // the checkXxx method - + 1 // the runtime config method - + 1 // the instrumented method - ; - Optional module = StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE) - .walk( - s -> s.skip(framesToSkip) - .map(f -> f.getDeclaringClass().getModule()) - .filter(m -> systemModules.contains(m) == false) - .findFirst() - ); + Optional module = StackWalker.getInstance(RETAIN_CLASS_REFERENCE) + .walk(frames -> findRequestingModule(frames.map(StackFrame::getDeclaringClass))); return module.orElse(null); } + /** + * Given a stream of classes corresponding to the frames from a {@link StackWalker}, + * returns the module whose entitlements should be checked. + * + * @throws NullPointerException if the requesting module is {@code null} + */ + Optional findRequestingModule(Stream> classes) { + return classes.map(Objects::requireNonNull) + .map(PolicyManager::moduleOf) + .filter(m -> m != entitlementsModule) // Ignore the entitlements library itself + .filter(not(systemModules::contains)) // Skip trusted JDK modules + .findFirst(); + } + + private static Module moduleOf(Class c) { + var result = c.getModule(); + if (result == null) { + throw new NullPointerException("Entitlements system does not support non-modular class [" + c.getName() + "]"); + } else { + return result; + } + } + private static boolean isTriviallyAllowed(Module requestingModule) { if (requestingModule == null) { logger.debug("Entitlement trivially allowed: entire call stack is in composed of classes in system modules"); diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java index 45bdf2e457824..0789fcc8dc770 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Stream; import static java.util.Map.entry; import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED; @@ -37,11 +38,14 @@ @ESTestCase.WithoutSecurityManager public class PolicyManagerTests extends ESTestCase { + private static final Module NO_ENTITLEMENTS_MODULE = null; + public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), Map.of("plugin1", createPluginPolicy("plugin.module")), - c -> "plugin1" + c -> "plugin1", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do @@ -62,7 +66,7 @@ public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { } public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); @@ -82,7 +86,7 @@ public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { } public void testGetEntitlementsFailureIsCached() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); @@ -103,7 +107,8 @@ public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), - c -> "plugin2" + c -> "plugin2", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do @@ -115,7 +120,7 @@ public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { } public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null); + var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -138,7 +143,7 @@ public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotF } public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null); + var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -155,12 +160,13 @@ public void testGetEntitlementsReturnsEntitlementsForServerModule() throws Class public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOException, ClassNotFoundException { final Path home = createTempDir(); - Path jar = creteMockPluginJar(home); + Path jar = createMockPluginJar(home); var policyManager = new PolicyManager( createEmptyTestServerPolicy(), Map.of("mock-plugin", createPluginPolicy("org.example.plugin")), - c -> "mock-plugin" + c -> "mock-plugin", + NO_ENTITLEMENTS_MODULE ); var layer = createLayerForJar(jar, "org.example.plugin"); @@ -179,7 +185,8 @@ public void testGetEntitlementsResultIsCached() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), - c -> "plugin2" + c -> "plugin2", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do @@ -197,6 +204,73 @@ public void testGetEntitlementsResultIsCached() { assertThat(entitlementsAgain, sameInstance(cachedResult)); } + public void testRequestingModuleFastPath() throws IOException, ClassNotFoundException { + var callerClass = makeClassInItsOwnModule(); + assertEquals(callerClass.getModule(), policyManagerWithEntitlementsModule(NO_ENTITLEMENTS_MODULE).requestingModule(callerClass)); + } + + public void testRequestingModuleWithStackWalk() throws IOException, ClassNotFoundException { + var requestingClass = makeClassInItsOwnModule(); + var runtimeClass = makeClassInItsOwnModule(); // A class in the entitlements library itself + var ignorableClass = makeClassInItsOwnModule(); + var systemClass = Object.class; + + var policyManager = policyManagerWithEntitlementsModule(runtimeClass.getModule()); + + var requestingModule = requestingClass.getModule(); + + assertEquals( + "Skip one system frame", + requestingModule, + policyManager.findRequestingModule(Stream.of(systemClass, requestingClass, ignorableClass)).orElse(null) + ); + assertEquals( + "Skip multiple system frames", + requestingModule, + policyManager.findRequestingModule(Stream.of(systemClass, systemClass, systemClass, requestingClass, ignorableClass)) + .orElse(null) + ); + assertEquals( + "Skip system frame between runtime frames", + requestingModule, + policyManager.findRequestingModule(Stream.of(runtimeClass, systemClass, runtimeClass, requestingClass, ignorableClass)) + .orElse(null) + ); + assertEquals( + "Skip runtime frame between system frames", + requestingModule, + policyManager.findRequestingModule(Stream.of(systemClass, runtimeClass, systemClass, requestingClass, ignorableClass)) + .orElse(null) + ); + assertEquals( + "No system frames", + requestingModule, + policyManager.findRequestingModule(Stream.of(requestingClass, ignorableClass)).orElse(null) + ); + assertEquals( + "Skip runtime frames up to the first system frame", + requestingModule, + policyManager.findRequestingModule(Stream.of(runtimeClass, runtimeClass, systemClass, requestingClass, ignorableClass)) + .orElse(null) + ); + assertThrows( + "Non-modular caller frames are not supported", + NullPointerException.class, + () -> policyManager.findRequestingModule(Stream.of(systemClass, null)) + ); + } + + private static Class makeClassInItsOwnModule() throws IOException, ClassNotFoundException { + final Path home = createTempDir(); + Path jar = createMockPluginJar(home); + var layer = createLayerForJar(jar, "org.example.plugin"); + return layer.findLoader("org.example.plugin").loadClass("q.B"); + } + + private static PolicyManager policyManagerWithEntitlementsModule(Module entitlementsModule) { + return new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "test", entitlementsModule); + } + private static Policy createEmptyTestServerPolicy() { return new Policy("server", List.of()); } @@ -219,7 +293,7 @@ private static Policy createPluginPolicy(String... pluginModules) { ); } - private static Path creteMockPluginJar(Path home) throws IOException { + private static Path createMockPluginJar(Path home) throws IOException { Path jar = home.resolve("mock-plugin.jar"); Map sources = Map.ofEntries( From 7d301185bf1a650db09bb87033be70141353a5a5 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 18 Dec 2024 20:40:11 +0100 Subject: [PATCH 11/62] Don't throw VerificationException on illegal state (#118826) If we end up here, we need to know this - and we won't know it if we return a 400 to the user. This should be a 500. --- .../elasticsearch/xpack/esql/analysis/Analyzer.java | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index ecd0821c626bf..3d1bfdfd0ef42 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -691,15 +691,9 @@ private List resolveUsingColumns(List cols, List Date: Wed, 18 Dec 2024 19:47:12 +0000 Subject: [PATCH 12/62] Add a generic `rescorer` retriever based on the search request's rescore functionality (#118585) This pull request introduces a new retriever called `rescorer`, which leverages the `rescore` functionality of the search request. The `rescorer` retriever re-scores only the top documents retrieved by its child retriever, offering fine-tuned scoring capabilities. All rescorers supported in the `rescore` section of a search request are available in this retriever, and the same format is used to define the rescore configuration.
Example: ```yaml - do: search: index: test body: retriever: rescorer: rescore: window_size: 10 query: rescore_query: rank_feature: field: "features.second_stage" linear: { } query_weight: 0 retriever: standard: query: rank_feature: field: "features.first_stage" linear: { } size: 2 ```
Closes #118327 Co-authored-by: Liam Thompson <32779855+leemthompo@users.noreply.github.com> --- docs/changelog/118585.yaml | 7 + docs/reference/search/retriever.asciidoc | 121 +++++- rest-api-spec/build.gradle | 1 + .../30_rescorer_retriever.yml | 225 ++++++++++ .../test/search/90_search_after.yml | 25 -- .../search/functionscore/QueryRescorerIT.java | 23 + .../search/DefaultSearchContext.java | 3 +- .../elasticsearch/search/SearchFeatures.java | 7 + .../elasticsearch/search/SearchModule.java | 2 + .../search/builder/SearchSourceBuilder.java | 14 - .../query/QueryPhaseCollectorManager.java | 3 +- .../search/rescore/RescorePhase.java | 102 ++++- .../search/rescore/RescorerBuilder.java | 2 +- .../retriever/CompoundRetrieverBuilder.java | 43 +- .../retriever/RescorerRetrieverBuilder.java | 173 ++++++++ .../search/retriever/RetrieverBuilder.java | 2 +- .../search/DefaultSearchContextTests.java | 6 +- .../RescorerRetrieverBuilderParsingTests.java | 78 ++++ .../retriever/QueryRuleRetrieverBuilder.java | 1 - .../TextSimilarityRankRetrieverBuilder.java | 1 - x-pack/plugin/rank-rrf/build.gradle | 1 + .../xpack/rank/rrf/RRFRetrieverBuilder.java | 1 - .../rrf/RRFRankClientYamlTestSuiteIT.java | 1 + .../test/rrf/900_rrf_with_rescorer.yml | 409 ++++++++++++++++++ 24 files changed, 1180 insertions(+), 71 deletions(-) create mode 100644 docs/changelog/118585.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilder.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilderParsingTests.java create mode 100644 x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/900_rrf_with_rescorer.yml diff --git a/docs/changelog/118585.yaml b/docs/changelog/118585.yaml new file mode 100644 index 0000000000000..4caa5efabbd33 --- /dev/null +++ b/docs/changelog/118585.yaml @@ -0,0 +1,7 @@ +pr: 118585 +summary: Add a generic `rescorer` retriever based on the search request's rescore + functionality +area: Ranking +type: feature +issues: + - 118327 diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index f20e9148bf5e7..c7df40ff5e070 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -22,6 +22,9 @@ A <> that replaces the functionality of a traditi `knn`:: A <> that replaces the functionality of a <>. +`rescorer`:: +A <> that replaces the functionality of the <>. + `rrf`:: A <> that produces top documents from <>. @@ -371,6 +374,122 @@ GET movies/_search ---- // TEST[skip:uses ELSER] +[[rescorer-retriever]] +==== Rescorer Retriever + +The `rescorer` retriever re-scores only the results produced by its child retriever. +For the `standard` and `knn` retrievers, the `window_size` parameter specifies the number of documents examined per shard. + +For compound retrievers like `rrf`, the `window_size` parameter defines the total number of documents examined globally. + +When using the `rescorer`, an error is returned if the following conditions are not met: + +* The minimum configured rescore's `window_size` is: +** Greater than or equal to the `size` of the parent retriever for nested `rescorer` setups. +** Greater than or equal to the `size` of the search request when used as the primary retriever in the tree. + +* And the maximum rescore's `window_size` is: +** Smaller than or equal to the `size` or `rank_window_size` of the child retriever. + +[discrete] +[[rescorer-retriever-parameters]] +===== Parameters + +`rescore`:: +(Required. <>) ++ +Defines the <> applied sequentially to the top documents returned by the child retriever. + +`retriever`:: +(Required. <>) ++ +Specifies the child retriever responsible for generating the initial set of top documents to be re-ranked. + +`filter`:: +(Optional. <>) ++ +Applies a <> to the retriever, ensuring that all documents match the filter criteria without affecting their scores. + +[discrete] +[[rescorer-retriever-example]] +==== Example + +The `rescorer` retriever can be placed at any level within the retriever tree. +The following example demonstrates a `rescorer` applied to the results produced by an `rrf` retriever: + +[source,console] +---- +GET movies/_search +{ + "size": 10, <1> + "retriever": { + "rescorer": { <2> + "rescore": { + "query": { <3> + "window_size": 50, <4> + "rescore_query": { + "script_score": { + "script": { + "source": "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", + "params": { + "queryVector": [-0.5, 90.0, -10, 14.8, -156.0] + } + } + } + } + } + }, + "retriever": { <5> + "rrf": { + "rank_window_size": 100, <6> + "retrievers": [ + { + "standard": { + "query": { + "sparse_vector": { + "field": "plot_embedding", + "inference_id": "my-elser-model", + "query": "films that explore psychological depths" + } + } + } + }, + { + "standard": { + "query": { + "multi_match": { + "query": "crime", + "fields": [ + "plot", + "title" + ] + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [10, 22, 77], + "k": 10, + "num_candidates": 10 + } + } + ] + } + } + } + } +} +---- +// TEST[skip:uses ELSER] +<1> Specifies the number of top documents to return in the final response. +<2> A `rescorer` retriever applied as the final step. +<3> The definition of the `query` rescorer. +<4> Defines the number of documents to rescore from the child retriever. +<5> Specifies the child retriever definition. +<6> Defines the number of documents returned by the `rrf` retriever, which limits the available documents to + [[text-similarity-reranker-retriever]] ==== Text Similarity Re-ranker Retriever @@ -777,4 +896,4 @@ When a retriever is specified as part of a search, the following elements are no * <> * <> * <> -* <> +* <> use a <> instead diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index bdee32e596c4c..f23b5460f7d53 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -70,4 +70,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("search.vectors/41_knn_search_bbq_hnsw/Test knn search", "Scoring has changed in latest versions") task.skipTest("search.vectors/42_knn_search_bbq_flat/Test knn search", "Scoring has changed in latest versions") task.skipTest("synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set", "Can't work until auto-expand replicas is 0-1 for synonyms index") + task.skipTest("search/90_search_after/_shard_doc sort", "restriction has been lifted in latest versions") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml new file mode 100644 index 0000000000000..2c16de61c6b15 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml @@ -0,0 +1,225 @@ +setup: + - requires: + cluster_features: [ "search.retriever.rescorer.enabled" ] + reason: "Support for rescorer retriever" + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + available: + type: boolean + features: + type: rank_features + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {"_id": 1 }}' + - '{"features": { "first_stage": 1, "second_stage": 10}, "available": true, "group": 1}' + - '{"index": {"_id": 2 }}' + - '{"features": { "first_stage": 2, "second_stage": 9}, "available": false, "group": 1}' + - '{"index": {"_id": 3 }}' + - '{"features": { "first_stage": 3, "second_stage": 8}, "available": false, "group": 3}' + - '{"index": {"_id": 4 }}' + - '{"features": { "first_stage": 4, "second_stage": 7}, "available": true, "group": 1}' + - '{"index": {"_id": 5 }}' + - '{"features": { "first_stage": 5, "second_stage": 6}, "available": true, "group": 3}' + - '{"index": {"_id": 6 }}' + - '{"features": { "first_stage": 6, "second_stage": 5}, "available": false, "group": 2}' + - '{"index": {"_id": 7 }}' + - '{"features": { "first_stage": 7, "second_stage": 4}, "available": true, "group": 3}' + - '{"index": {"_id": 8 }}' + - '{"features": { "first_stage": 8, "second_stage": 3}, "available": true, "group": 1}' + - '{"index": {"_id": 9 }}' + - '{"features": { "first_stage": 9, "second_stage": 2}, "available": true, "group": 2}' + - '{"index": {"_id": 10 }}' + - '{"features": { "first_stage": 10, "second_stage": 1}, "available": false, "group": 1}' + +--- +"Rescorer retriever basic": + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 10 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 9.0 } + + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 3 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: {} + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: {} + size: 2 + + - match: {hits.total.value: 10} + - match: {hits.hits.0._id: "8"} + - match: { hits.hits.0._score: 3.0 } + - match: {hits.hits.1._id: "9"} + - match: { hits.hits.1._score: 2.0 } + +--- +"Rescorer retriever with pre-filters": + - do: + search: + index: test + body: + retriever: + rescorer: + filter: + match: + available: true + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 6 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "4" } + - match: { hits.hits.1._score: 7.0 } + + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 4 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + filter: + match: + available: true + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 6 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.0._score: 6.0 } + - match: { hits.hits.1._id: "7" } + - match: { hits.hits.1._score: 4.0 } + +--- +"Rescorer retriever and collapsing": + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + collapse: + field: group + size: 3 + + - match: { hits.total.value: 10 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1._score: 8.0 } + - match: { hits.hits.2._id: "6" } + - match: { hits.hits.2._score: 5.0 } + +--- +"Rescorer retriever and invalid window size": + - do: + catch: "/\\[rescorer\\] requires \\[window_size: 5\\] be greater than or equal to \\[size: 10\\]/" + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 5 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 10 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml index 1fefc8bffffa1..d3b2b5a412717 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml @@ -218,31 +218,6 @@ - match: {hits.hits.0._source.timestamp: "2019-10-21 00:30:04.828740" } - match: {hits.hits.0.sort: [1571617804828740000] } - ---- -"_shard_doc sort": - - requires: - cluster_features: ["gte_v7.12.0"] - reason: _shard_doc sort was added in 7.12 - - - do: - indices.create: - index: test - - do: - index: - index: test - id: "1" - body: { id: 1, foo: bar, age: 18 } - - - do: - catch: /\[_shard_doc\] sort field cannot be used without \[point in time\]/ - search: - index: test - body: - size: 1 - sort: ["_shard_doc"] - search_after: [ 0L ] - --- "Format sort values": - requires: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index a7efb2fe0e68b..fbdcfe26d28ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.rescore.QueryRescoreMode; import org.elasticsearch.search.rescore.QueryRescorerBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.ParseField; @@ -840,6 +841,20 @@ public void testRescorePhaseWithInvalidSort() throws Exception { } } ); + + assertResponse( + prepareSearch().addSort(SortBuilders.scoreSort()) + .addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50), + response -> { + assertThat(response.getHits().getTotalHits().value(), equalTo(5L)); + assertThat(response.getHits().getHits().length, equalTo(5)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(101f)); + } + } + ); } record GroupDoc(String id, String group, float firstPassScore, float secondPassScore, boolean shouldFilter) {} @@ -879,6 +894,10 @@ public void testRescoreAfterCollapse() throws Exception { .setQuery(fieldValueScoreQuery("firstPassScore")) .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore"))) .setCollapse(new CollapseBuilder("group")); + if (randomBoolean()) { + request.addSort(SortBuilders.scoreSort()); + request.addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + } assertResponse(request, resp -> { assertThat(resp.getHits().getTotalHits().value(), equalTo(5L)); assertThat(resp.getHits().getHits().length, equalTo(3)); @@ -958,6 +977,10 @@ public void testRescoreAfterCollapseRandom() throws Exception { .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore")).setQueryWeight(0f).windowSize(numGroups)) .setCollapse(new CollapseBuilder("group")) .setSize(Math.min(numGroups, 10)); + if (randomBoolean()) { + request.addSort(SortBuilders.scoreSort()); + request.addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + } long expectedNumHits = numHits; assertResponse(request, resp -> { assertThat(resp.getHits().getTotalHits().value(), equalTo(expectedNumHits)); diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index b87d097413b67..47d3ed337af73 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -73,6 +73,7 @@ import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; import org.elasticsearch.search.rank.feature.RankFeatureResult; import org.elasticsearch.search.rescore.RescoreContext; +import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -377,7 +378,7 @@ public void preProcess() { ); } if (rescore != null) { - if (sort != null) { + if (RescorePhase.validateSort(sort) == false) { throw new IllegalArgumentException("Cannot use [sort] option in conjunction with [rescore]."); } int maxWindow = indexService.getIndexSettings().getMaxRescoreWindow(); diff --git a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java index beac39c2de304..553511346b182 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java +++ b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java @@ -23,4 +23,11 @@ public final class SearchFeatures implements FeatureSpecification { public Set getFeatures() { return Set.of(KnnVectorQueryBuilder.K_PARAM_SUPPORTED, LUCENE_10_0_0_UPGRADE); } + + public static final NodeFeature RETRIEVER_RESCORER_ENABLED = new NodeFeature("search.retriever.rescorer.enabled"); + + @Override + public Set getTestFeatures() { + return Set.of(RETRIEVER_RESCORER_ENABLED); + } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index d282ba425b126..3294e1ba03f6b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -231,6 +231,7 @@ import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.RescorerBuilder; import org.elasticsearch.search.retriever.KnnRetrieverBuilder; +import org.elasticsearch.search.retriever.RescorerRetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.StandardRetrieverBuilder; @@ -1080,6 +1081,7 @@ private void registerFetchSubPhase(FetchSubPhase subPhase) { private void registerRetrieverParsers(List plugins) { registerRetriever(new RetrieverSpec<>(StandardRetrieverBuilder.NAME, StandardRetrieverBuilder::fromXContent)); registerRetriever(new RetrieverSpec<>(KnnRetrieverBuilder.NAME, KnnRetrieverBuilder::fromXContent)); + registerRetriever(new RetrieverSpec<>(RescorerRetrieverBuilder.NAME, RescorerRetrieverBuilder::fromXContent)); registerFromPlugin(plugins, SearchPlugin::getRetrievers, this::registerRetriever); } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 3554a6dc08b90..8c21abe4180ea 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -48,9 +48,7 @@ import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.slice.SliceBuilder; -import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; -import org.elasticsearch.search.sort.ShardDocSortField; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -2341,18 +2339,6 @@ public ActionRequestValidationException validate( validationException = rescorer.validate(this, validationException); } } - - if (pointInTimeBuilder() == null && sorts() != null) { - for (var sortBuilder : sorts()) { - if (sortBuilder instanceof FieldSortBuilder fieldSortBuilder - && ShardDocSortField.NAME.equals(fieldSortBuilder.getFieldName())) { - validationException = addValidationError( - "[" + FieldSortBuilder.SHARD_DOC_FIELD_NAME + "] sort field cannot be used without [point in time]", - validationException - ); - } - } - } return validationException; } } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java index cbc04dd460ff5..3d793a164f40a 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java @@ -58,6 +58,7 @@ import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.InternalProfileCollector; import org.elasticsearch.search.rescore.RescoreContext; +import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.sort.SortAndFormats; import java.io.IOException; @@ -238,7 +239,7 @@ static CollectorManager createQueryPhaseCollectorMa int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; if (rescore) { - assert searchContext.sort() == null; + assert RescorePhase.validateSort(searchContext.sort()); for (RescoreContext rescoreContext : searchContext.rescore()) { numDocs = Math.max(numDocs, rescoreContext.getWindowSize()); } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 7e3646e7689cc..c23df9cdfa441 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; @@ -22,9 +23,12 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.SearchTimeoutException; +import org.elasticsearch.search.sort.ShardDocSortField; +import org.elasticsearch.search.sort.SortAndFormats; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -39,15 +43,27 @@ public static void execute(SearchContext context) { if (context.size() == 0 || context.rescore() == null || context.rescore().isEmpty()) { return; } - + if (validateSort(context.sort()) == false) { + throw new IllegalStateException("Cannot use [sort] option in conjunction with [rescore], missing a validate?"); + } TopDocs topDocs = context.queryResult().topDocs().topDocs; if (topDocs.scoreDocs.length == 0) { return; } + // Populate FieldDoc#score using the primary sort field (_score) to ensure compatibility with top docs rescoring + Arrays.stream(topDocs.scoreDocs).forEach(t -> { + if (t instanceof FieldDoc fieldDoc) { + fieldDoc.score = (float) fieldDoc.fields[0]; + } + }); TopFieldGroups topGroups = null; + TopFieldDocs topFields = null; if (topDocs instanceof TopFieldGroups topFieldGroups) { - assert context.collapse() != null; + assert context.collapse() != null && validateSortFields(topFieldGroups.fields); topGroups = topFieldGroups; + } else if (topDocs instanceof TopFieldDocs topFieldDocs) { + assert validateSortFields(topFieldDocs.fields); + topFields = topFieldDocs; } try { Runnable cancellationCheck = getCancellationChecks(context); @@ -56,17 +72,18 @@ public static void execute(SearchContext context) { topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); // It is the responsibility of the rescorer to sort the resulted top docs, // here we only assert that this condition is met. - assert context.sort() == null && topDocsSortedByScore(topDocs) : "topdocs should be sorted after rescore"; + assert topDocsSortedByScore(topDocs) : "topdocs should be sorted after rescore"; ctx.setCancellationChecker(null); } + /** + * Since rescorers are building top docs with score only, we must reconstruct the {@link TopFieldGroups} + * or {@link TopFieldDocs} using their original version before rescoring. + */ if (topGroups != null) { assert context.collapse() != null; - /** - * Since rescorers don't preserve collapsing, we must reconstruct the group and field - * values from the originalTopGroups to create a new {@link TopFieldGroups} from the - * rescored top documents. - */ - topDocs = rewriteTopGroups(topGroups, topDocs); + topDocs = rewriteTopFieldGroups(topGroups, topDocs); + } else if (topFields != null) { + topDocs = rewriteTopFieldDocs(topFields, topDocs); } context.queryResult() .topDocs(new TopDocsAndMaxScore(topDocs, topDocs.scoreDocs[0].score), context.queryResult().sortValueFormats()); @@ -81,29 +98,84 @@ public static void execute(SearchContext context) { } } - private static TopFieldGroups rewriteTopGroups(TopFieldGroups originalTopGroups, TopDocs rescoredTopDocs) { - assert originalTopGroups.fields.length == 1 && SortField.FIELD_SCORE.equals(originalTopGroups.fields[0]) - : "rescore must always sort by score descending"; + /** + * Returns whether the provided {@link SortAndFormats} can be used to rescore + * top documents. + */ + public static boolean validateSort(SortAndFormats sortAndFormats) { + if (sortAndFormats == null) { + return true; + } + return validateSortFields(sortAndFormats.sort.getSort()); + } + + private static boolean validateSortFields(SortField[] fields) { + if (fields[0].equals(SortField.FIELD_SCORE) == false) { + return false; + } + if (fields.length == 1) { + return true; + } + + // The ShardDocSortField can be used as a tiebreaker because it maintains + // the natural document ID order within the shard. + if (fields[1] instanceof ShardDocSortField == false || fields[1].getReverse()) { + return false; + } + return true; + } + + private static TopFieldDocs rewriteTopFieldDocs(TopFieldDocs originalTopFieldDocs, TopDocs rescoredTopDocs) { + Map docIdToFieldDoc = Maps.newMapWithExpectedSize(originalTopFieldDocs.scoreDocs.length); + for (int i = 0; i < originalTopFieldDocs.scoreDocs.length; i++) { + docIdToFieldDoc.put(originalTopFieldDocs.scoreDocs[i].doc, (FieldDoc) originalTopFieldDocs.scoreDocs[i]); + } + var newScoreDocs = new FieldDoc[rescoredTopDocs.scoreDocs.length]; + int pos = 0; + for (var doc : rescoredTopDocs.scoreDocs) { + newScoreDocs[pos] = docIdToFieldDoc.get(doc.doc); + newScoreDocs[pos].score = doc.score; + newScoreDocs[pos].fields[0] = newScoreDocs[pos].score; + pos++; + } + return new TopFieldDocs(originalTopFieldDocs.totalHits, newScoreDocs, originalTopFieldDocs.fields); + } + + private static TopFieldGroups rewriteTopFieldGroups(TopFieldGroups originalTopGroups, TopDocs rescoredTopDocs) { + var newFieldDocs = rewriteFieldDocs((FieldDoc[]) originalTopGroups.scoreDocs, rescoredTopDocs.scoreDocs); + Map docIdToGroupValue = Maps.newMapWithExpectedSize(originalTopGroups.scoreDocs.length); for (int i = 0; i < originalTopGroups.scoreDocs.length; i++) { docIdToGroupValue.put(originalTopGroups.scoreDocs[i].doc, originalTopGroups.groupValues[i]); } - var newScoreDocs = new FieldDoc[rescoredTopDocs.scoreDocs.length]; var newGroupValues = new Object[originalTopGroups.groupValues.length]; int pos = 0; for (var doc : rescoredTopDocs.scoreDocs) { - newScoreDocs[pos] = new FieldDoc(doc.doc, doc.score, new Object[] { doc.score }); newGroupValues[pos++] = docIdToGroupValue.get(doc.doc); } return new TopFieldGroups( originalTopGroups.field, originalTopGroups.totalHits, - newScoreDocs, + newFieldDocs, originalTopGroups.fields, newGroupValues ); } + private static FieldDoc[] rewriteFieldDocs(FieldDoc[] originalTopDocs, ScoreDoc[] rescoredTopDocs) { + Map docIdToFieldDoc = Maps.newMapWithExpectedSize(rescoredTopDocs.length); + Arrays.stream(originalTopDocs).forEach(d -> docIdToFieldDoc.put(d.doc, d)); + var newDocs = new FieldDoc[rescoredTopDocs.length]; + int pos = 0; + for (var doc : rescoredTopDocs) { + newDocs[pos] = docIdToFieldDoc.get(doc.doc); + newDocs[pos].score = doc.score; + newDocs[pos].fields[0] = doc.score; + pos++; + } + return newDocs; + } + /** * Returns true if the provided docs are sorted by score. */ diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java index f624961515389..38a319321207f 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java @@ -39,7 +39,7 @@ public abstract class RescorerBuilder> protected Integer windowSize; - private static final ParseField WINDOW_SIZE_FIELD = new ParseField("window_size"); + public static final ParseField WINDOW_SIZE_FIELD = new ParseField("window_size"); /** * Construct an empty RescoreBuilder. diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index 2ab6395db73b5..298340e5c579e 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -32,10 +32,12 @@ import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.ShardDocSortField; import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.xcontent.ParseField; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -49,6 +51,8 @@ public abstract class CompoundRetrieverBuilder rankWindowSize) { validationException = addValidationError( - "[" - + this.getName() - + "] requires [rank_window_size: " - + rankWindowSize - + "]" - + " be greater than or equal to [size: " - + source.size() - + "]", + String.format( + Locale.ROOT, + "[%s] requires [%s: %d] be greater than or equal to [size: %d]", + getName(), + getRankWindowSizeField().getPreferredName(), + rankWindowSize, + source.size() + ), validationException ); } @@ -231,6 +243,21 @@ public ActionRequestValidationException validate( } for (RetrieverSource innerRetriever : innerRetrievers) { validationException = innerRetriever.retriever().validate(source, validationException, isScroll, allowPartialSearchResults); + if (innerRetriever.retriever() instanceof CompoundRetrieverBuilder compoundChild) { + if (rankWindowSize > compoundChild.rankWindowSize) { + String errorMessage = String.format( + Locale.ROOT, + "[%s] requires [%s: %d] to be smaller than or equal to its sub retriever's %s [%s: %d]", + this.getName(), + getRankWindowSizeField().getPreferredName(), + rankWindowSize, + compoundChild.getName(), + compoundChild.getRankWindowSizeField(), + compoundChild.rankWindowSize + ); + validationException = addValidationError(errorMessage, validationException); + } + } } return validationException; } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilder.java new file mode 100644 index 0000000000000..09688b5b9b001 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilder.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.search.retriever; + +import org.apache.lucene.search.ScoreDoc; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.search.builder.SearchSourceBuilder.RESCORE_FIELD; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A {@link CompoundRetrieverBuilder} that re-scores only the results produced by its child retriever. + */ +public final class RescorerRetrieverBuilder extends CompoundRetrieverBuilder { + + public static final String NAME = "rescorer"; + public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + args -> new RescorerRetrieverBuilder((RetrieverBuilder) args[0], (List>) args[1]) + ); + + static { + PARSER.declareNamedObject(constructorArg(), (parser, context, n) -> { + RetrieverBuilder innerRetriever = parser.namedObject(RetrieverBuilder.class, n, context); + context.trackRetrieverUsage(innerRetriever.getName()); + return innerRetriever; + }, RETRIEVER_FIELD); + PARSER.declareField(constructorArg(), (parser, context) -> { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List> rescorers = new ArrayList<>(); + while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { + rescorers.add(RescorerBuilder.parseFromXContent(parser, name -> context.trackRescorerUsage(name))); + } + return rescorers; + } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + return List.of(RescorerBuilder.parseFromXContent(parser, name -> context.trackRescorerUsage(name))); + } else { + throw new IllegalArgumentException( + "Unknown format for [rescorer.rescore], expects an object or an array of objects, got: " + parser.currentToken() + ); + } + }, RESCORE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY); + RetrieverBuilder.declareBaseParserFields(NAME, PARSER); + } + + public static RescorerRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + try { + return PARSER.apply(parser, context); + } catch (Exception e) { + throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); + } + } + + private final List> rescorers; + + public RescorerRetrieverBuilder(RetrieverBuilder retriever, List> rescorers) { + super(List.of(new RetrieverSource(retriever, null)), extractMinWindowSize(rescorers)); + if (rescorers.isEmpty()) { + throw new IllegalArgumentException("Missing rescore definition"); + } + this.rescorers = rescorers; + } + + private RescorerRetrieverBuilder(RetrieverSource retriever, List> rescorers) { + super(List.of(retriever), extractMinWindowSize(rescorers)); + this.rescorers = rescorers; + } + + /** + * The minimum window size is used as the {@link CompoundRetrieverBuilder#rankWindowSize}, + * the final number of top documents to return in this retriever. + */ + private static int extractMinWindowSize(List> rescorers) { + int windowSize = Integer.MAX_VALUE; + for (var rescore : rescorers) { + windowSize = Math.min(rescore.windowSize() == null ? RescorerBuilder.DEFAULT_WINDOW_SIZE : rescore.windowSize(), windowSize); + } + return windowSize; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public ParseField getRankWindowSizeField() { + return RescorerBuilder.WINDOW_SIZE_FIELD; + } + + @Override + protected SearchSourceBuilder finalizeSourceBuilder(SearchSourceBuilder source) { + /** + * The re-scorer is passed downstream because this query operates only on + * the top documents retrieved by the child retriever. + * + * - If the sub-retriever is a {@link CompoundRetrieverBuilder}, only the top + * documents are re-scored since they are already determined at this stage. + * - For other retrievers that do not require a rewrite, the re-scorer's window + * size is applied per shard. As a result, more documents are re-scored + * compared to the final top documents produced by these retrievers in isolation. + */ + for (var rescorer : rescorers) { + source.addRescorer(rescorer); + } + return source; + } + + @Override + public void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RETRIEVER_FIELD.getPreferredName(), innerRetrievers.getFirst().retriever()); + builder.startArray(RESCORE_FIELD.getPreferredName()); + for (RescorerBuilder rescorer : rescorers) { + rescorer.toXContent(builder, params); + } + builder.endArray(); + } + + @Override + protected RescorerRetrieverBuilder clone(List newChildRetrievers, List newPreFilterQueryBuilders) { + var newInstance = new RescorerRetrieverBuilder(newChildRetrievers.get(0), rescorers); + newInstance.preFilterQueryBuilders = newPreFilterQueryBuilders; + return newInstance; + } + + @Override + protected RankDoc[] combineInnerRetrieverResults(List rankResults) { + assert rankResults.size() == 1; + ScoreDoc[] scoreDocs = rankResults.getFirst(); + RankDoc[] rankDocs = new RankDoc[scoreDocs.length]; + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + rankDocs[i] = new RankDoc(scoreDoc.doc, scoreDoc.score, scoreDoc.shardIndex); + rankDocs[i].rank = i + 1; + } + return rankDocs; + } + + @Override + public boolean doEquals(Object o) { + RescorerRetrieverBuilder that = (RescorerRetrieverBuilder) o; + return super.doEquals(o) && Objects.equals(rescorers, that.rescorers); + } + + @Override + public int doHashCode() { + return Objects.hash(super.doHashCode(), rescorers); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index d52c354cad69e..b9bfdfdf3402f 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -63,7 +63,7 @@ protected static void declareBaseParserFields( AbstractObjectParser parser ) { parser.declareObjectArray( - (r, v) -> r.preFilterQueryBuilders = v, + (r, v) -> r.preFilterQueryBuilders = new ArrayList<>(v), (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p, c::trackQueryUsage), PRE_FILTER_FIELD ); diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index a474c1dc38c50..d3a3792f605db 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.store.BaseDirectoryWrapper; @@ -245,7 +246,10 @@ protected Engine.Searcher acquireSearcherInternal(String source) { // resultWindow not greater than maxResultWindow and both rescore and sort are not null context1.from(0); DocValueFormat docValueFormat = mock(DocValueFormat.class); - SortAndFormats sortAndFormats = new SortAndFormats(new Sort(), new DocValueFormat[] { docValueFormat }); + SortAndFormats sortAndFormats = new SortAndFormats( + new Sort(new SortField[] { SortField.FIELD_DOC }), + new DocValueFormat[] { docValueFormat } + ); context1.sort(sortAndFormats); RescoreContext rescoreContext = mock(RescoreContext.class); diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilderParsingTests.java new file mode 100644 index 0000000000000..fa83246d90cb2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilderParsingTests.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.rescore.QueryRescorerBuilderTests; +import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParser; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.emptyList; + +public class RescorerRetrieverBuilderParsingTests extends AbstractXContentTestCase { + private static List xContentRegistryEntries; + + @BeforeClass + public static void init() { + xContentRegistryEntries = new SearchModule(Settings.EMPTY, emptyList()).getNamedXContents(); + } + + @AfterClass + public static void afterClass() throws Exception { + xContentRegistryEntries = null; + } + + @Override + protected RescorerRetrieverBuilder createTestInstance() { + int num = randomIntBetween(1, 3); + List> rescorers = new ArrayList<>(); + for (int i = 0; i < num; i++) { + rescorers.add(QueryRescorerBuilderTests.randomRescoreBuilder()); + } + return new RescorerRetrieverBuilder(TestRetrieverBuilder.createRandomTestRetrieverBuilder(), rescorers); + } + + @Override + protected RescorerRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { + return (RescorerRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder( + parser, + new RetrieverParserContext(new SearchUsage(), n -> true) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(xContentRegistryEntries); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + TestRetrieverBuilder.TEST_SPEC.getName(), + (p, c) -> TestRetrieverBuilder.TEST_SPEC.getParser().fromXContent(p, (RetrieverParserContext) c), + TestRetrieverBuilder.TEST_SPEC.getName().getForRestApiVersion() + ) + ); + return new NamedXContentRegistry(entries); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java index 5b27cc7a3e05a..3a53ed977318d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java @@ -50,7 +50,6 @@ public final class QueryRuleRetrieverBuilder extends CompoundRetrieverBuilder PARSER = new ConstructingObjectParser<>( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index fd2427dc8ac6a..46bebebff9c95 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -47,7 +47,6 @@ public class TextSimilarityRankRetrieverBuilder extends CompoundRetrieverBuilder public static final ParseField INFERENCE_ID_FIELD = new ParseField("inference_id"); public static final ParseField INFERENCE_TEXT_FIELD = new ParseField("inference_text"); public static final ParseField FIELD_FIELD = new ParseField("field"); - public static final ParseField RANK_WINDOW_SIZE_FIELD = new ParseField("rank_window_size"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TextSimilarityRankBuilder.NAME, args -> { diff --git a/x-pack/plugin/rank-rrf/build.gradle b/x-pack/plugin/rank-rrf/build.gradle index 2c3f217243aa4..b2d470c6618ea 100644 --- a/x-pack/plugin/rank-rrf/build.gradle +++ b/x-pack/plugin/rank-rrf/build.gradle @@ -22,6 +22,7 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) testImplementation(testArtifact(project(':server'))) + clusterModules project(':modules:mapper-extras') clusterModules project(xpackModule('rank-rrf')) clusterModules project(xpackModule('inference')) clusterModules project(':modules:lang-painless') diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index f1171b74f7468..c1447623dd5b1 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -48,7 +48,6 @@ public final class RRFRetrieverBuilder extends CompoundRetrieverBuilder Date: Wed, 18 Dec 2024 14:31:17 -0600 Subject: [PATCH 13/62] block-writes cannot be added after read-only (#119007) Fix bug in ReindexDataStreamIndexAction. If the source index has both a block-writes and is read-only, these must be updated on the destination index. If read-only is set first, the block-writes cannot be added because settings cannot be modified. --- docs/changelog/119007.yaml | 6 ++++++ muted-tests.yml | 2 -- .../action/ReindexDataStreamIndexTransportAction.java | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/119007.yaml diff --git a/docs/changelog/119007.yaml b/docs/changelog/119007.yaml new file mode 100644 index 0000000000000..458101b68d454 --- /dev/null +++ b/docs/changelog/119007.yaml @@ -0,0 +1,6 @@ +pr: 119007 +summary: Block-writes cannot be added after read-only +area: Data streams +type: bug +issues: + - 119002 diff --git a/muted-tests.yml b/muted-tests.yml index 81480e89d1e8b..a06334146ed7b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -293,8 +293,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/118955 - class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT issue: https://github.com/elastic/elasticsearch/issues/118970 -- class: org.elasticsearch.xpack.migrate.action.ReindexDatastreamIndexTransportActionIT - issue: https://github.com/elastic/elasticsearch/issues/119002 # Examples: # diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java index 165fd61ae6599..66b13a9ce22b0 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java @@ -92,8 +92,8 @@ protected void doExecute( .andThen(l -> deleteDestIfExists(destIndexName, l)) .andThen(l -> createIndex(sourceIndex, destIndexName, l)) .andThen(l -> reindex(sourceIndexName, destIndexName, l)) - .andThen(l -> addBlockIfFromSource(READ_ONLY, settingsBefore, destIndexName, l)) .andThen(l -> addBlockIfFromSource(WRITE, settingsBefore, destIndexName, l)) + .andThen(l -> addBlockIfFromSource(READ_ONLY, settingsBefore, destIndexName, l)) .andThenApply(ignored -> new ReindexDataStreamIndexAction.Response(destIndexName)) .addListener(listener); } From 6e2c614af34175f55b25ece83f90cffe0e96542c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 18 Dec 2024 13:01:55 -0800 Subject: [PATCH 14/62] Use minimum java version for javadoc tool (#118908) When compiling we use a compiler for the minimum java version. However, javadoc is left to whatever Java gradle uses. This commit adjusts javadoc to also use a javadoc tool for the minimum java version. --- .../internal/ElasticsearchJavaPlugin.java | 20 +++++++++++++++++-- .../gradle/internal/MrjarPlugin.java | 14 ++++++++++--- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java index e62c26c7fbc01..3ab85ba69dc80 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java @@ -31,11 +31,15 @@ import org.gradle.api.tasks.bundling.Jar; import org.gradle.api.tasks.javadoc.Javadoc; import org.gradle.external.javadoc.CoreJavadocOptions; +import org.gradle.jvm.toolchain.JavaLanguageVersion; +import org.gradle.jvm.toolchain.JavaToolchainService; import org.gradle.language.base.plugins.LifecycleBasePlugin; import java.io.File; import java.util.Map; +import javax.inject.Inject; + import static org.elasticsearch.gradle.internal.conventions.util.Util.toStringable; import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; @@ -44,6 +48,14 @@ * common configuration for production code. */ public class ElasticsearchJavaPlugin implements Plugin { + + private final JavaToolchainService javaToolchains; + + @Inject + ElasticsearchJavaPlugin(JavaToolchainService javaToolchains) { + this.javaToolchains = javaToolchains; + } + @Override public void apply(Project project) { project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); @@ -55,7 +67,7 @@ public void apply(Project project) { // configureConfigurations(project); configureJars(project, buildParams.get()); configureJarManifest(project, buildParams.get()); - configureJavadoc(project); + configureJavadoc(project, buildParams.get()); testCompileOnlyDeps(project); } @@ -128,7 +140,7 @@ private static void configureJarManifest(Project project, BuildParameterExtensio project.getPluginManager().apply("nebula.info-jar"); } - private static void configureJavadoc(Project project) { + private void configureJavadoc(Project project, BuildParameterExtension buildParams) { project.getTasks().withType(Javadoc.class).configureEach(javadoc -> { /* * Generate docs using html5 to suppress a warning from `javadoc` @@ -136,6 +148,10 @@ private static void configureJavadoc(Project project) { */ CoreJavadocOptions javadocOptions = (CoreJavadocOptions) javadoc.getOptions(); javadocOptions.addBooleanOption("html5", true); + + javadoc.getJavadocTool().set(javaToolchains.javadocToolFor(spec -> { + spec.getLanguageVersion().set(JavaLanguageVersion.of(buildParams.getMinimumRuntimeVersion().getMajorVersion())); + })); }); TaskProvider javadoc = project.getTasks().withType(Javadoc.class).named("javadoc"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java index 7c488e6e73fee..5402e0a04fe8f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java @@ -86,14 +86,14 @@ public void apply(Project project) { configurePreviewFeatures(project, javaExtension.getSourceSets().getByName(SourceSet.TEST_SOURCE_SET_NAME), 21); for (int javaVersion : mainVersions) { String mainSourceSetName = SourceSet.MAIN_SOURCE_SET_NAME + javaVersion; - SourceSet mainSourceSet = addSourceSet(project, javaExtension, mainSourceSetName, mainSourceSets, javaVersion); + SourceSet mainSourceSet = addSourceSet(project, javaExtension, mainSourceSetName, mainSourceSets, javaVersion, true); configureSourceSetInJar(project, mainSourceSet, javaVersion); addJar(project, mainSourceSet, javaVersion); mainSourceSets.add(mainSourceSetName); testSourceSets.add(mainSourceSetName); String testSourceSetName = SourceSet.TEST_SOURCE_SET_NAME + javaVersion; - SourceSet testSourceSet = addSourceSet(project, javaExtension, testSourceSetName, testSourceSets, javaVersion); + SourceSet testSourceSet = addSourceSet(project, javaExtension, testSourceSetName, testSourceSets, javaVersion, false); testSourceSets.add(testSourceSetName); createTestTask(project, buildParams, testSourceSet, javaVersion, mainSourceSets); } @@ -121,7 +121,8 @@ private SourceSet addSourceSet( JavaPluginExtension javaExtension, String sourceSetName, List parentSourceSets, - int javaVersion + int javaVersion, + boolean isMainSourceSet ) { SourceSet sourceSet = javaExtension.getSourceSets().maybeCreate(sourceSetName); for (String parentSourceSetName : parentSourceSets) { @@ -135,6 +136,13 @@ private SourceSet addSourceSet( CompileOptions compileOptions = compileTask.getOptions(); compileOptions.getRelease().set(javaVersion); }); + if (isMainSourceSet) { + project.getTasks().create(sourceSet.getJavadocTaskName(), Javadoc.class, javadocTask -> { + javadocTask.getJavadocTool().set(javaToolchains.javadocToolFor(spec -> { + spec.getLanguageVersion().set(JavaLanguageVersion.of(javaVersion)); + })); + }); + } configurePreviewFeatures(project, sourceSet, javaVersion); // Since we configure MRJAR sourcesets to allow preview apis, class signatures for those From 1f4fef13f49918afcc53593c99ec41ebab6b2de0 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Dec 2024 13:12:17 -0800 Subject: [PATCH 15/62] Improve efficiency of incremental builds when building bwc distributions (#118713) --- .../InternalDistributionBwcSetupPlugin.java | 33 ++++++++++++++----- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index da26cb66122ad..0e8dbb7fce26c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -17,12 +17,12 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; +import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.ProjectLayout; import org.gradle.api.model.ObjectFactory; import org.gradle.api.plugins.JvmToolchainsPlugin; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; -import org.gradle.api.tasks.Copy; import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.TaskProvider; import org.gradle.jvm.toolchain.JavaToolchainService; @@ -54,11 +54,17 @@ public class InternalDistributionBwcSetupPlugin implements Plugin { private final ObjectFactory objectFactory; private ProviderFactory providerFactory; private JavaToolchainService toolChainService; + private FileSystemOperations fileSystemOperations; @Inject - public InternalDistributionBwcSetupPlugin(ObjectFactory objectFactory, ProviderFactory providerFactory) { + public InternalDistributionBwcSetupPlugin( + ObjectFactory objectFactory, + ProviderFactory providerFactory, + FileSystemOperations fileSystemOperations + ) { this.objectFactory = objectFactory; this.providerFactory = providerFactory; + this.fileSystemOperations = fileSystemOperations; } @Override @@ -76,7 +82,8 @@ public void apply(Project project) { providerFactory, objectFactory, toolChainService, - isCi + isCi, + fileSystemOperations ); }); } @@ -88,7 +95,8 @@ private static void configureBwcProject( ProviderFactory providerFactory, ObjectFactory objectFactory, JavaToolchainService toolChainService, - Boolean isCi + Boolean isCi, + FileSystemOperations fileSystemOperations ) { ProjectLayout layout = project.getLayout(); Provider versionInfoProvider = providerFactory.provider(() -> versionInfo); @@ -120,11 +128,18 @@ private static void configureBwcProject( List distributionProjects = resolveArchiveProjects(checkoutDir.get(), bwcVersion.get()); // Setup gradle user home directory - project.getTasks().register("setupGradleUserHome", Copy.class, copy -> { - copy.into(project.getGradle().getGradleUserHomeDir().getAbsolutePath() + "-" + project.getName()); - copy.from(project.getGradle().getGradleUserHomeDir().getAbsolutePath(), copySpec -> { - copySpec.include("gradle.properties"); - copySpec.include("init.d/*"); + // We don't use a normal `Copy` task here as snapshotting the entire gradle user home is very expensive. This task is cheap, so + // up-to-date checking doesn't buy us much + project.getTasks().register("setupGradleUserHome", task -> { + task.doLast(t -> { + fileSystemOperations.copy(copy -> { + String gradleUserHome = project.getGradle().getGradleUserHomeDir().getAbsolutePath(); + copy.into(gradleUserHome + "-" + project.getName()); + copy.from(gradleUserHome, copySpec -> { + copySpec.include("gradle.properties"); + copySpec.include("init.d/*"); + }); + }); }); }); From 24773e0ba619e8f99293521b389a7266a0c8214d Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 19 Dec 2024 08:23:51 +1100 Subject: [PATCH 16/62] Mute org.elasticsearch.xpack.security.authc.AuthenticationServiceTests testInvalidToken #119019 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a06334146ed7b..2d215bfb04c57 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -293,6 +293,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/118955 - class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT issue: https://github.com/elastic/elasticsearch/issues/118970 +- class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests + method: testInvalidToken + issue: https://github.com/elastic/elasticsearch/issues/119019 # Examples: # From cc69e06974a8b9b543962e7fe98e1b954e89a268 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 18 Dec 2024 22:35:55 +0100 Subject: [PATCH 17/62] Re-add support for some metadata field parameters (#118825) We removed support for type, fields, copy_to and boost in metadata field definitions with #116944 but with the move towards supporting N-2 read-only indices we need to add them back. This change reverts previous removal commits and adapts tests to also check we now throw errors for newly created indices. --- docs/changelog/{116944.yaml => 118825.yaml} | 4 +- .../index/mapper/MetadataFieldMapper.java | 22 +++++ .../index/KnownIndexVersions.java | 1 + .../index/mapper/MetadataMapperTestCase.java | 85 +++++++++++++++++++ 4 files changed, 110 insertions(+), 2 deletions(-) rename docs/changelog/{116944.yaml => 118825.yaml} (84%) diff --git a/docs/changelog/116944.yaml b/docs/changelog/118825.yaml similarity index 84% rename from docs/changelog/116944.yaml rename to docs/changelog/118825.yaml index e7833e49cf965..23170ec4705da 100644 --- a/docs/changelog/116944.yaml +++ b/docs/changelog/118825.yaml @@ -1,4 +1,4 @@ -pr: 116944 +pr: 118825 summary: "Remove support for type, fields, `copy_to` and boost in metadata field definition" area: Mapping type: breaking @@ -6,6 +6,6 @@ issues: [] breaking: title: "Remove support for type, fields, copy_to and boost in metadata field definition" area: Mapping - details: The type, fields, copy_to and boost parameters are no longer supported in metadata field definition + details: The type, fields, copy_to and boost parameters are no longer supported in metadata field definition starting with version 9. impact: Users providing type, fields, copy_to or boost as part of metadata field definition should remove them from their mappings. notable: false diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 31aa787c3f758..033742b3b57fc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -10,13 +10,17 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Iterator; import java.util.Map; +import java.util.Set; import java.util.function.Function; /** @@ -132,6 +136,8 @@ public final MetadataFieldMapper build(MapperBuilderContext context) { return build(); } + private static final Set UNSUPPORTED_PARAMETERS_8_6_0 = Set.of("type", "fields", "copy_to", "boost"); + public final void parseMetadataField(String name, MappingParserContext parserContext, Map fieldNode) { final Parameter[] params = getParameters(); Map> paramsMap = Maps.newHashMapWithExpectedSize(params.length); @@ -144,6 +150,22 @@ public final void parseMetadataField(String name, MappingParserContext parserCon final Object propNode = entry.getValue(); Parameter parameter = paramsMap.get(propName); if (parameter == null) { + IndexVersion indexVersionCreated = parserContext.indexVersionCreated(); + if (indexVersionCreated.before(IndexVersions.UPGRADE_TO_LUCENE_10_0_0) + && UNSUPPORTED_PARAMETERS_8_6_0.contains(propName)) { + if (indexVersionCreated.onOrAfter(IndexVersions.V_8_6_0)) { + // silently ignore type, and a few other parameters: sadly we've been doing this for a long time + deprecationLogger.warn( + DeprecationCategory.API, + propName, + "Parameter [{}] has no effect on metadata field [{}] and will be removed in future", + propName, + name + ); + } + iterator.remove(); + continue; + } throw new MapperParsingException("unknown parameter [" + propName + "] on metadata field [" + name + "]"); } parameter.parse(name, parserContext, propNode); diff --git a/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java b/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java index 5cdb3f1808a38..4f559a5f3eaef 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java +++ b/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java @@ -19,6 +19,7 @@ public class KnownIndexVersions { * A sorted list of all known index versions */ public static final List ALL_VERSIONS = List.copyOf(IndexVersions.getAllVersions()); + /** * A sorted list of all known index versions that can be written to */ diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java index 449ecc099412f..580eb6eacb27e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; @@ -142,4 +143,88 @@ public final void testFixedMetaFieldsAreNotConfigurable() throws IOException { ); assertEquals("Failed to parse mapping: " + fieldName() + " is not configurable", exception.getMessage()); } + + public void testTypeAndFriendsAreAcceptedBefore_8_6_0() throws IOException { + assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); + IndexVersion previousVersion = IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_6_0); + // we randomly also pick read-only versions to test that we can still parse the parameters for them + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersionUtils.getLowestReadCompatibleVersion(), + previousVersion + ); + assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); + MapperService mapperService = createMapperService(version, mapping(b -> {})); + // these parameters were previously silently ignored, they will still be ignored in existing indices + String[] unsupportedParameters = new String[] { "fields", "copy_to", "boost", "type" }; + for (String param : unsupportedParameters) { + String mappingAsString = "{\n" + + " \"_doc\" : {\n" + + " \"" + + fieldName() + + "\" : {\n" + + " \"" + + param + + "\" : \"any\"\n" + + " }\n" + + " }\n" + + "}"; + assertNotNull(mapperService.parseMapping("_doc", MergeReason.MAPPING_UPDATE, new CompressedXContent(mappingAsString))); + } + } + + public void testTypeAndFriendsAreDeprecatedFrom_8_6_0_TO_9_0_0() throws IOException { + assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); + IndexVersion previousVersion = IndexVersionUtils.getPreviousVersion(IndexVersions.UPGRADE_TO_LUCENE_10_0_0); + IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_6_0, previousVersion); + assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); + MapperService mapperService = createMapperService(version, mapping(b -> {})); + // these parameters were deprecated, they now should throw an error in new indices + String[] unsupportedParameters = new String[] { "fields", "copy_to", "boost", "type" }; + for (String param : unsupportedParameters) { + String mappingAsString = "{\n" + + " \"_doc\" : {\n" + + " \"" + + fieldName() + + "\" : {\n" + + " \"" + + param + + "\" : \"any\"\n" + + " }\n" + + " }\n" + + "}"; + assertNotNull(mapperService.parseMapping("_doc", MergeReason.MAPPING_UPDATE, new CompressedXContent(mappingAsString))); + assertWarnings("Parameter [" + param + "] has no effect on metadata field [" + fieldName() + "] and will be removed in future"); + } + } + + public void testTypeAndFriendsThrow_After_9_0_0() throws IOException { + assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.UPGRADE_TO_LUCENE_10_0_0, + IndexVersion.current() + ); + assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); + MapperService mapperService = createMapperService(version, mapping(b -> {})); + // these parameters were previously silently ignored, they are now deprecated in new indices + String[] unsupportedParameters = new String[] { "fields", "copy_to", "boost", "type" }; + for (String param : unsupportedParameters) { + String mappingAsString = "{\n" + + " \"_doc\" : {\n" + + " \"" + + fieldName() + + "\" : {\n" + + " \"" + + param + + "\" : \"any\"\n" + + " }\n" + + " }\n" + + "}"; + expectThrows( + MapperParsingException.class, + () -> mapperService.parseMapping("_doc", MergeReason.MAPPING_UPDATE, new CompressedXContent(mappingAsString)) + ); + } + } } From b8130768f6c41e876014dff8838981c99c1078b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 18 Dec 2024 22:37:14 +0100 Subject: [PATCH 18/62] Add back version based logic from IndexSortConfig This change re-introduces pre-7.13 deprecation logging and silent handling of index sorting on alias fields. We need to still support this for v9 for read-only indices. --- .../elasticsearch/index/IndexSortConfig.java | 25 ++++++++++++++++++- .../index/IndexSortSettingsTests.java | 14 +++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index 2811c7493a277..6c044ab999899 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -14,6 +14,8 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -53,6 +55,8 @@ **/ public final class IndexSortConfig { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndexSortConfig.class); + /** * The list of field names */ @@ -134,10 +138,14 @@ private static MultiValueMode parseMultiValueMode(String value) { // visible for tests final FieldSortSpec[] sortSpecs; + private final IndexVersion indexCreatedVersion; + private final String indexName; private final IndexMode indexMode; public IndexSortConfig(IndexSettings indexSettings) { final Settings settings = indexSettings.getSettings(); + this.indexCreatedVersion = indexSettings.getIndexVersionCreated(); + this.indexName = indexSettings.getIndex().getName(); this.indexMode = indexSettings.getMode(); if (this.indexMode == IndexMode.TIME_SERIES) { @@ -230,7 +238,22 @@ public Sort buildIndexSort( throw new IllegalArgumentException(err); } if (Objects.equals(ft.name(), sortSpec.field) == false) { - throw new IllegalArgumentException("Cannot use alias [" + sortSpec.field + "] as an index sort field"); + if (this.indexCreatedVersion.onOrAfter(IndexVersions.V_7_13_0)) { + throw new IllegalArgumentException("Cannot use alias [" + sortSpec.field + "] as an index sort field"); + } else { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + "index-sort-aliases", + "Index sort for index [" + + indexName + + "] defined on field [" + + sortSpec.field + + "] which resolves to field [" + + ft.name() + + "]. " + + "You will not be able to define an index sort over aliased fields in new indexes" + ); + } } boolean reverse = sortSpec.order == null ? false : (sortSpec.order == SortOrder.DESC); MultiValueMode mode = sortSpec.mode; diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 441ad8a5a225a..7221d69b74d46 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -160,6 +160,20 @@ public void testSortingAgainstAliases() { assertEquals("Cannot use alias [field] as an index sort field", e.getMessage()); } + public void testSortingAgainstAliasesPre713() { + IndexSettings indexSettings = indexSettings( + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.V_7_12_0).put("index.sort.field", "field").build() + ); + MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); + Sort sort = buildIndexSort(indexSettings, Map.of("field", aliased)); + assertThat(sort.getSort(), arrayWithSize(1)); + assertThat(sort.getSort()[0].getField(), equalTo("aliased")); + assertWarnings( + "Index sort for index [test] defined on field [field] which resolves to field [aliased]. " + + "You will not be able to define an index sort over aliased fields in new indexes" + ); + } + public void testTimeSeriesMode() { IndexSettings indexSettings = indexSettings( Settings.builder() From e087f3d9371370c0cde96d83538938ca0d15276c Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 18 Dec 2024 15:43:53 -0600 Subject: [PATCH 19/62] Connecting the reindex data stream persistent task to ReindexDataStreamIndexAction (#118978) --- ...indexDataStreamPersistentTaskExecutor.java | 125 ++++++++++++-- .../upgrades/DataStreamsUpgradeIT.java | 156 ++++++++++++++++++ 2 files changed, 269 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index 494be303980a7..dc8e33bc091e6 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -9,8 +9,15 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; +import org.elasticsearch.action.support.CountDownActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -20,9 +27,13 @@ import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamIndexAction; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.getOldIndexVersionPredicate; @@ -72,22 +83,109 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask reindexClient.execute(GetDataStreamAction.INSTANCE, request, ActionListener.wrap(response -> { List dataStreamInfos = response.getDataStreams(); if (dataStreamInfos.size() == 1) { - List indices = dataStreamInfos.getFirst().getDataStream().getIndices(); - List indicesToBeReindexed = indices.stream() - .filter(getOldIndexVersionPredicate(clusterService.state().metadata())) - .toList(); - reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); - for (Index index : indicesToBeReindexed) { - reindexDataStreamTask.incrementInProgressIndicesCount(index.getName()); - // TODO This is just a placeholder. This is where the real data stream reindex logic will go - reindexDataStreamTask.reindexSucceeded(index.getName()); + DataStream dataStream = dataStreamInfos.getFirst().getDataStream(); + if (getOldIndexVersionPredicate(clusterService.state().metadata()).test(dataStream.getWriteIndex())) { + reindexClient.execute( + RolloverAction.INSTANCE, + new RolloverRequest(sourceDataStream, null), + ActionListener.wrap( + rolloverResponse -> reindexIndices(dataStream, reindexDataStreamTask, reindexClient, sourceDataStream), + e -> completeFailedPersistentTask(reindexDataStreamTask, e) + ) + ); + } else { + reindexIndices(dataStream, reindexDataStreamTask, reindexClient, sourceDataStream); } - - completeSuccessfulPersistentTask(reindexDataStreamTask); } else { completeFailedPersistentTask(reindexDataStreamTask, new ElasticsearchException("data stream does not exist")); } - }, reindexDataStreamTask::markAsFailed)); + }, exception -> completeFailedPersistentTask(reindexDataStreamTask, exception))); + } + + private void reindexIndices( + DataStream dataStream, + ReindexDataStreamTask reindexDataStreamTask, + ExecuteWithHeadersClient reindexClient, + String sourceDataStream + ) { + List indices = dataStream.getIndices(); + List indicesToBeReindexed = indices.stream().filter(getOldIndexVersionPredicate(clusterService.state().metadata())).toList(); + reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); + // The CountDownActionListener is 1 more than the number of indices so that the count is not 0 if we have no indices + CountDownActionListener listener = new CountDownActionListener(indicesToBeReindexed.size() + 1, ActionListener.wrap(response1 -> { + completeSuccessfulPersistentTask(reindexDataStreamTask); + }, exception -> { completeFailedPersistentTask(reindexDataStreamTask, exception); })); + List indicesRemaining = Collections.synchronizedList(new ArrayList<>(indicesToBeReindexed)); + final int maxConcurrentIndices = 1; + for (int i = 0; i < maxConcurrentIndices; i++) { + maybeProcessNextIndex(indicesRemaining, reindexDataStreamTask, reindexClient, sourceDataStream, listener); + } + // This takes care of the additional latch count referenced above: + listener.onResponse(null); + } + + private void maybeProcessNextIndex( + List indicesRemaining, + ReindexDataStreamTask reindexDataStreamTask, + ExecuteWithHeadersClient reindexClient, + String sourceDataStream, + CountDownActionListener listener + ) { + if (indicesRemaining.isEmpty()) { + return; + } + Index index; + try { + index = indicesRemaining.removeFirst(); + } catch (NoSuchElementException e) { + return; + } + reindexDataStreamTask.incrementInProgressIndicesCount(index.getName()); + reindexClient.execute( + ReindexDataStreamIndexAction.INSTANCE, + new ReindexDataStreamIndexAction.Request(index.getName()), + ActionListener.wrap(response1 -> { + updateDataStream(sourceDataStream, index.getName(), response1.getDestIndex(), ActionListener.wrap(unused -> { + reindexDataStreamTask.reindexSucceeded(index.getName()); + listener.onResponse(null); + maybeProcessNextIndex(indicesRemaining, reindexDataStreamTask, reindexClient, sourceDataStream, listener); + }, exception -> { + reindexDataStreamTask.reindexFailed(index.getName(), exception); + listener.onResponse(null); + }), reindexClient); + }, exception -> { + reindexDataStreamTask.reindexFailed(index.getName(), exception); + listener.onResponse(null); + }) + ); + } + + private void updateDataStream( + String dataStream, + String oldIndex, + String newIndex, + ActionListener listener, + ExecuteWithHeadersClient reindexClient + ) { + reindexClient.execute( + ModifyDataStreamsAction.INSTANCE, + new ModifyDataStreamsAction.Request( + TimeValue.MAX_VALUE, + TimeValue.MAX_VALUE, + List.of(DataStreamAction.removeBackingIndex(dataStream, oldIndex), DataStreamAction.addBackingIndex(dataStream, newIndex)) + ), + new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + listener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + } + ); } private void completeSuccessfulPersistentTask(ReindexDataStreamTask persistentTask) { @@ -105,6 +203,9 @@ private TimeValue getTimeToLive(ReindexDataStreamTask reindexDataStreamTask) { PersistentTasksCustomMetadata.PersistentTask persistentTask = persistentTasksCustomMetadata.getTask( reindexDataStreamTask.getPersistentTaskId() ); + if (persistentTask == null) { + return TimeValue.timeValueMillis(0); + } PersistentTaskState state = persistentTask.getState(); final long completionTime; if (state == null) { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java index 40ad5bba29baa..58556dd420ca6 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java @@ -11,15 +11,23 @@ import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.Strings; +import org.elasticsearch.xcontent.json.JsonXContent; import org.hamcrest.Matchers; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.time.Instant; import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.upgrades.IndexingIT.assertCount; +import static org.hamcrest.Matchers.equalTo; public class DataStreamsUpgradeIT extends AbstractUpgradeTestCase { @@ -164,4 +172,152 @@ public void testDataStreamValidationDoesNotBreakUpgrade() throws Exception { } } + public void testUpgradeDataStream() throws Exception { + String dataStreamName = "reindex_test_data_stream"; + int numRollovers = 5; + if (CLUSTER_TYPE == ClusterType.OLD) { + createAndRolloverDataStream(dataStreamName, numRollovers); + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + upgradeDataStream(dataStreamName, numRollovers); + } + } + + private static void createAndRolloverDataStream(String dataStreamName, int numRollovers) throws IOException { + // We want to create a data stream and roll it over several times so that we have several indices to upgrade + final String template = """ + { + "settings":{ + "index": { + "mode": "time_series" + } + }, + "mappings":{ + "dynamic_templates": [ + { + "labels": { + "path_match": "pod.labels.*", + "mapping": { + "type": "keyword", + "time_series_dimension": true + } + } + } + ], + "properties": { + "@timestamp" : { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "name": { + "type": "keyword" + }, + "network": { + "properties": { + "tx": { + "type": "long" + }, + "rx": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + """; + final String indexTemplate = """ + { + "index_patterns": ["$PATTERN"], + "template": $TEMPLATE, + "data_stream": { + } + }"""; + var putIndexTemplateRequest = new Request("POST", "/_index_template/reindex_test_data_stream_template"); + putIndexTemplateRequest.setJsonEntity(indexTemplate.replace("$TEMPLATE", template).replace("$PATTERN", dataStreamName)); + assertOK(client().performRequest(putIndexTemplateRequest)); + bulkLoadData(dataStreamName); + for (int i = 0; i < numRollovers; i++) { + rollover(dataStreamName); + bulkLoadData(dataStreamName); + } + } + + private void upgradeDataStream(String dataStreamName, int numRollovers) throws Exception { + Request reindexRequest = new Request("POST", "/_migration/reindex"); + reindexRequest.setJsonEntity(Strings.format(""" + { + "mode": "upgrade", + "source": { + "index": "%s" + } + }""", dataStreamName)); + Response reindexResponse = client().performRequest(reindexRequest); + assertOK(reindexResponse); + assertBusy(() -> { + Request statusRequest = new Request("GET", "_migration/reindex/" + dataStreamName + "/_status"); + Response statusResponse = client().performRequest(statusRequest); + Map statusResponseMap = XContentHelper.convertToMap( + JsonXContent.jsonXContent, + statusResponse.getEntity().getContent(), + false + ); + assertOK(statusResponse); + assertThat(statusResponseMap.get("complete"), equalTo(true)); + if (isOriginalClusterCurrent()) { + // If the original cluster was the same as this one, we don't want any indices reindexed: + assertThat(statusResponseMap.get("successes"), equalTo(0)); + } else { + assertThat(statusResponseMap.get("successes"), equalTo(numRollovers + 1)); + } + }, 60, TimeUnit.SECONDS); + Request cancelRequest = new Request("POST", "_migration/reindex/" + dataStreamName + "/_cancel"); + Response cancelResponse = client().performRequest(cancelRequest); + assertOK(cancelResponse); + } + + private static void bulkLoadData(String dataStreamName) throws IOException { + final String bulk = """ + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cat", "network": {"tx": 2001818691, "rx": 802133794}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "network": {"tx": 2005177954, "rx": 801479970}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cow", "network": {"tx": 2006223737, "rx": 802337279}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "rat", "network": {"tx": 2012916202, "rx": 803685721}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "dog", "network": {"tx": 1434521831, "rx": 530575198}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "tiger", "network": {"tx": 1434577921, "rx": 530600088}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "lion", "network": {"tx": 1434587694, "rx": 530604797}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "elephant", "network": {"tx": 1434595272, "rx": 530605511}}}} + """; + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + bulkRequest.setJsonEntity(bulk.replace("$now", formatInstant(Instant.now()))); + var response = client().performRequest(bulkRequest); + assertOK(response); + } + + static String formatInstant(Instant instant) { + return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); + } + + private static void rollover(String dataStreamName) throws IOException { + Request rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); + Response rolloverResponse = client().performRequest(rolloverRequest); + assertOK(rolloverResponse); + } } From c54a26db49da892170f13434cfe5dd9f3bfe78bb Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 19 Dec 2024 09:20:22 +1100 Subject: [PATCH 20/62] Mute org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} #116777 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2d215bfb04c57..35a9b31685794 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -296,6 +296,9 @@ tests: - class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests method: testInvalidToken issue: https://github.com/elastic/elasticsearch/issues/119019 +- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT + method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} + issue: https://github.com/elastic/elasticsearch/issues/116777 # Examples: # From c3a59bb9659accaf36f83f0b28d49f68939f6ce7 Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Wed, 18 Dec 2024 18:06:52 -0500 Subject: [PATCH 21/62] Process execution checks and IT tests (#119010) * Process creation checks and IT tests * Remove process queries; only forbid execution --- .../bridge/EntitlementChecker.java | 7 ++++ .../common/RestEntitlementsCheckAction.java | 39 +++++++++++++------ .../EntitlementAllowedNonModularPlugin.java | 1 - .../qa/EntitlementAllowedPlugin.java | 1 - .../EntitlementDeniedNonModularPlugin.java | 1 - .../qa/EntitlementDeniedPlugin.java | 1 - .../api/ElasticsearchEntitlementChecker.java | 11 ++++++ .../runtime/policy/PolicyManager.java | 20 ++++++++++ 8 files changed, 66 insertions(+), 15 deletions(-) diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index a6b8a31fc3894..25f4e97bd12ee 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -11,6 +11,7 @@ import java.net.URL; import java.net.URLStreamHandlerFactory; +import java.util.List; public interface EntitlementChecker { @@ -29,4 +30,10 @@ public interface EntitlementChecker { void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent); void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory); + + // Process creation + void check$$start(Class callerClass, ProcessBuilder that, ProcessBuilder.Redirect[] redirects); + + void check$java_lang_ProcessBuilder$startPipeline(Class callerClass, List builders); + } diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index 1ac4a7506eacb..3cc4b97e9bfea 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -29,43 +29,47 @@ import java.util.stream.Collectors; import static java.util.Map.entry; +import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.deniedToPlugins; +import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.forPlugins; import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestEntitlementsCheckAction extends BaseRestHandler { private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class); private final String prefix; - private record CheckAction(Runnable action, boolean isServerOnly) { - - static CheckAction serverOnly(Runnable action) { + record CheckAction(Runnable action, boolean isAlwaysDeniedToPlugins) { + /** + * These cannot be granted to plugins, so our test plugins cannot test the "allowed" case. + * Used both for always-denied entitlements as well as those granted only to the server itself. + */ + static CheckAction deniedToPlugins(Runnable action) { return new CheckAction(action, true); } - static CheckAction serverAndPlugin(Runnable action) { + static CheckAction forPlugins(Runnable action) { return new CheckAction(action, false); } } private static final Map checkActions = Map.ofEntries( - entry("runtime_exit", CheckAction.serverOnly(RestEntitlementsCheckAction::runtimeExit)), - entry("runtime_halt", CheckAction.serverOnly(RestEntitlementsCheckAction::runtimeHalt)), - entry("create_classloader", CheckAction.serverAndPlugin(RestEntitlementsCheckAction::createClassLoader)) + entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), + entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), + entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)), + // entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)), + entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)) ); @SuppressForbidden(reason = "Specifically testing Runtime.exit") private static void runtimeExit() { - logger.info("Calling Runtime.exit;"); Runtime.getRuntime().exit(123); } @SuppressForbidden(reason = "Specifically testing Runtime.halt") private static void runtimeHalt() { - logger.info("Calling Runtime.halt;"); Runtime.getRuntime().halt(123); } private static void createClassLoader() { - logger.info("Calling new URLClassLoader"); try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { logger.info("Created URLClassLoader [{}]", classLoader.getName()); } catch (IOException e) { @@ -73,6 +77,18 @@ private static void createClassLoader() { } } + private static void processBuilder_start() { + // TODO: processBuilder().start(); + } + + private static void processBuilder_startPipeline() { + try { + ProcessBuilder.startPipeline(List.of()); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public RestEntitlementsCheckAction(String prefix) { this.prefix = prefix; } @@ -80,7 +96,7 @@ public RestEntitlementsCheckAction(String prefix) { public static Set getServerAndPluginsCheckActions() { return checkActions.entrySet() .stream() - .filter(kv -> kv.getValue().isServerOnly() == false) + .filter(kv -> kv.getValue().isAlwaysDeniedToPlugins() == false) .map(Map.Entry::getKey) .collect(Collectors.toSet()); } @@ -112,6 +128,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } return channel -> { + logger.info("Calling check action [{}]", actionName); checkAction.action().run(); channel.sendResponse(new RestResponse(RestStatus.OK, Strings.format("Succesfully executed action [%s]", actionName))); }; diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java index d65981c30f0be..82146e6a87759 100644 --- a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementAllowedNonModularPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java b/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java index d81e23e311be1..8649daf272e70 100644 --- a/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java +++ b/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementAllowedPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java b/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java index 0f908d84260fb..7ca89c735a602 100644 --- a/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java +++ b/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementDeniedNonModularPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java b/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java index 0ed27e2e576e7..2a2fd35d47cf3 100644 --- a/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java +++ b/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementDeniedPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index a5ca0543ad15a..75365fbb74d65 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -14,6 +14,7 @@ import java.net.URL; import java.net.URLStreamHandlerFactory; +import java.util.List; /** * Implementation of the {@link EntitlementChecker} interface, providing additional @@ -67,4 +68,14 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { ) { policyManager.checkCreateClassLoader(callerClass); } + + @Override + public void check$$start(Class callerClass, ProcessBuilder processBuilder, ProcessBuilder.Redirect[] redirects) { + policyManager.checkStartProcess(callerClass); + } + + @Override + public void check$java_lang_ProcessBuilder$startPipeline(Class callerClass, List builders) { + policyManager.checkStartProcess(callerClass); + } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index 74ba986041dac..e06f7768eb8be 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -105,6 +105,26 @@ private static Map> buildScopeEntitlementsMap(Policy p return policy.scopes.stream().collect(Collectors.toUnmodifiableMap(scope -> scope.name, scope -> scope.entitlements)); } + public void checkStartProcess(Class callerClass) { + neverEntitled(callerClass, "start process"); + } + + private void neverEntitled(Class callerClass, String operationDescription) { + var requestingModule = requestingModule(callerClass); + if (isTriviallyAllowed(requestingModule)) { + return; + } + + throw new NotEntitledException( + Strings.format( + "Not entitled: caller [%s], module [%s], operation [%s]", + callerClass, + requestingModule.getName(), + operationDescription + ) + ); + } + public void checkExitVM(Class callerClass) { checkEntitlementPresent(callerClass, ExitVMEntitlement.class); } From c98ca63b460ac6f546ee1c151d7bda3dbd641701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Dec 2024 00:09:37 +0100 Subject: [PATCH 22/62] Revert 7.x related code from analysis common (#118972) This reverts #113009 and re-introduces v7 compatibility logic and previous v7 tests since we now support v7 indices as read-only on v9. --- .../analysis/common/CommonAnalysisPlugin.java | 131 +++++++- .../common/CommonAnalysisPluginTests.java | 292 ++++++++++++++++++ .../common/EdgeNGramTokenizerTests.java | 3 +- 3 files changed, 419 insertions(+), 7 deletions(-) create mode 100644 modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index a97154fd4d1ff..c980aaba71444 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -101,7 +101,12 @@ import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.ElisionFilter; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; @@ -134,6 +139,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(CommonAnalysisPlugin.class); + private final SetOnce scriptServiceHolder = new SetOnce<>(); private final SetOnce synonymsManagementServiceHolder = new SetOnce<>(); @@ -224,6 +231,28 @@ public Map> getTokenFilters() { filters.put("dictionary_decompounder", requiresAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new)); filters.put("dutch_stem", DutchStemTokenFilterFactory::new); filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new); + filters.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + return new EdgeNGramTokenFilterFactory(indexSettings, environment, name, settings) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead." + ); + } else { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_deprecation", + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead." + ); + } + return super.create(tokenStream); + } + + }; + }); filters.put("elision", requiresAnalysisSettings(ElisionTokenFilterFactory::new)); filters.put("fingerprint", FingerprintTokenFilterFactory::new); filters.put("flatten_graph", FlattenGraphTokenFilterFactory::new); @@ -243,6 +272,28 @@ public Map> getTokenFilters() { filters.put("min_hash", MinHashTokenFilterFactory::new); filters.put("multiplexer", MultiplexerTokenFilterFactory::new); filters.put("ngram", NGramTokenFilterFactory::new); + filters.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + return new NGramTokenFilterFactory(indexSettings, environment, name, settings) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead." + ); + } else { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_deprecation", + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead." + ); + } + return super.create(tokenStream); + } + + }; + }); filters.put("pattern_capture", requiresAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new)); filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); @@ -294,7 +345,39 @@ public Map> getTokenizers() { tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new); tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new); tokenizers.put("thai", ThaiTokenizerFactory::new); + tokenizers.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [ngram] for indices created in versions 8 or higher instead." + ); + } else if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_tokenizer_deprecation", + "The [nGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [ngram] instead." + ); + } + return new NGramTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("ngram", NGramTokenizerFactory::new); + tokenizers.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [edge_nGram] for indices created in versions 8 or higher instead." + ); + } else if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_tokenizer_deprecation", + "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [edge_ngram] instead." + ); + } + return new EdgeNGramTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new); tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); @@ -505,17 +588,53 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add( - PreConfiguredTokenizer.indexVersion( - "edge_ngram", - (version) -> new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE) - ) - ); + tokenizers.add(PreConfiguredTokenizer.indexVersion("edge_ngram", (version) -> { + if (version.onOrAfter(IndexVersions.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API // This is already broken with normalization, so backwards compat isn't necessary? tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new)); + + tokenizers.add(PreConfiguredTokenizer.indexVersion("nGram", (version) -> { + if (version.onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [ngram] for indices created in versions 8 or higher instead." + ); + } else if (version.onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_tokenizer_deprecation", + "The [nGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [ngram] instead." + ); + } + return new NGramTokenizer(); + })); + tokenizers.add(PreConfiguredTokenizer.indexVersion("edgeNGram", (version) -> { + if (version.onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [edge_ngram] for indices created in versions 8 or higher instead." + ); + } else if (version.onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_tokenizer_deprecation", + "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [edge_ngram] instead." + ); + } + if (version.onOrAfter(IndexVersions.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); return tokenizers; diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java new file mode 100644 index 0000000000000..9972d58b2dcc1 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -0,0 +1,292 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.index.IndexVersionUtils; + +import java.io.IOException; +import java.util.Map; + +public class CommonAnalysisPluginTests extends ESTestCase { + + /** + * Check that the deprecated "nGram" filter throws exception for indices created since 7.0.0 and + * logs a warning for earlier indices when the filter is used as a custom filter + */ + public void testNGramFilterInCustomAnalyzerDeprecationError() throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "nGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin) + ); + assertEquals( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead.", + ex.getMessage() + ); + } + + final Settings settingsPre7 = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "nGram") + .build(); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); + assertWarnings( + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead." + ); + } + } + + /** + * Check that the deprecated "edgeNGram" filter throws exception for indices created since 7.0.0 and + * logs a warning for earlier indices when the filter is used as a custom filter + */ + public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "edgeNGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin) + ); + assertEquals( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead.", + ex.getMessage() + ); + } + + final Settings settingsPre7 = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "edgeNGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); + assertWarnings( + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead." + ); + } + } + + /** + * Check that we log a deprecation warning for "nGram" and "edgeNGram" tokenizer names with 7.6 and + * disallow usages for indices created after 8.0 + */ + public void testNGramTokenizerDeprecation() throws IOException { + // tests for prebuilt tokenizer + doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + + // same batch of tests for custom tokenizer definition in the settings + doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + } + + public void doTestPrebuiltTokenizerDeprecation(String deprecatedName, String replacement, IndexVersion version, boolean expectWarning) + throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenizers = createTestAnalysis( + IndexSettingsModule.newIndexSettings("index", settings), + settings, + commonAnalysisPlugin + ).tokenizer; + TokenizerFactory tokenizerFactory = tokenizers.get(deprecatedName); + + Tokenizer tokenizer = tokenizerFactory.create(); + assertNotNull(tokenizer); + if (expectWarning) { + assertWarnings( + "The [" + + deprecatedName + + "] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [" + + replacement + + "] instead." + ); + } + } + } + + public void doTestCustomTokenizerDeprecation(String deprecatedName, String replacement, IndexVersion version, boolean expectWarning) + throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "my_tokenizer") + .put("index.analysis.tokenizer.my_tokenizer.type", deprecatedName) + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin); + + if (expectWarning) { + assertWarnings( + "The [" + + deprecatedName + + "] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [" + + replacement + + "] instead." + ); + } + } + } +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java index 11d1653439e59..c998e927e25a8 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java @@ -34,7 +34,7 @@ public class EdgeNGramTokenizerTests extends ESTokenStreamTestCase { - private static IndexAnalyzers buildAnalyzers(IndexVersion version, String tokenizer) throws IOException { + private IndexAnalyzers buildAnalyzers(IndexVersion version, String tokenizer) throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, version) @@ -54,6 +54,7 @@ public void testPreConfiguredTokenizer() throws IOException { assertNotNull(analyzer); assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); } + } public void testCustomTokenChars() throws IOException { From 547c7800e478ae69900c42d955262da5f4350a60 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 18 Dec 2024 15:10:29 -0800 Subject: [PATCH 23/62] Improve error message when whitelist resource file is not found (#119012) This commit replaces a NullPointerException that occurs if a whitelist resource is not found with a customized message. Additionally it augments the message with specific actions, especially in the case the owning class is modularized which requies additional work. --- .../painless/spi/WhitelistLoader.java | 32 ++++++++++- .../painless/WhitelistLoaderTests.java | 57 +++++++++++++++++++ .../bootstrap/test-framework.policy | 2 + .../org/elasticsearch/test/jar/JarUtils.java | 33 +++++++++++ 4 files changed, 121 insertions(+), 3 deletions(-) diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index 2e7f0de027de7..37bff97a07ae2 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -9,8 +9,10 @@ package org.elasticsearch.painless.spi; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.LineNumberReader; import java.lang.reflect.Constructor; @@ -140,7 +142,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep * } * } */ - public static Whitelist loadFromResourceFiles(Class resource, Map parsers, String... filepaths) { + public static Whitelist loadFromResourceFiles(Class owner, Map parsers, String... filepaths) { List whitelistClasses = new ArrayList<>(); List whitelistStatics = new ArrayList<>(); List whitelistClassBindings = new ArrayList<>(); @@ -153,7 +155,7 @@ public static Whitelist loadFromResourceFiles(Class resource, Map resource, Map) resource::getClassLoader); + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction) owner::getClassLoader); return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistClassBindings, Collections.emptyList()); } + private static InputStream getResourceAsStream(Class owner, String name) { + InputStream stream = owner.getResourceAsStream(name); + if (stream == null) { + String msg = "Whitelist file [" + + owner.getPackageName().replace(".", "/") + + "/" + + name + + "] not found from owning class [" + + owner.getName() + + "]."; + if (owner.getModule().isNamed()) { + msg += " Check that the file exists and the package [" + + owner.getPackageName() + + "] is opened " + + "to module " + + WhitelistLoader.class.getModule().getName(); + } + throw new ResourceNotFoundException(msg); + } + return stream; + } + private static List parseWhitelistAnnotations(Map parsers, String line) { List annotations; diff --git a/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java b/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java index e62d0b438b098..b46bc118e0913 100644 --- a/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java +++ b/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.painless; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.WhitelistClass; import org.elasticsearch.painless.spi.WhitelistLoader; @@ -17,10 +18,18 @@ import org.elasticsearch.painless.spi.annotation.NoImportAnnotation; import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; +import java.lang.ModuleLayer.Controller; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; import java.util.HashMap; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + public class WhitelistLoaderTests extends ESTestCase { public void testUnknownAnnotations() { @@ -96,4 +105,52 @@ public void testAnnotations() { assertEquals(3, count); } + + public void testMissingWhitelistResource() { + var e = expectThrows(ResourceNotFoundException.class, () -> WhitelistLoader.loadFromResourceFiles(Whitelist.class, "missing.txt")); + assertThat( + e.getMessage(), + equalTo( + "Whitelist file [org/elasticsearch/painless/spi/missing.txt] not found" + + " from owning class [org.elasticsearch.painless.spi.Whitelist]." + ) + ); + } + + public void testMissingWhitelistResourceInModule() throws Exception { + Map sources = new HashMap<>(); + sources.put("module-info", "module m {}"); + sources.put("p.TestOwner", "package p; public class TestOwner { }"); + var classToBytes = InMemoryJavaCompiler.compile(sources); + + Path dir = createTempDir(getTestName()); + Path jar = dir.resolve("m.jar"); + Map jarEntries = new HashMap<>(); + jarEntries.put("module-info.class", classToBytes.get("module-info")); + jarEntries.put("p/TestOwner.class", classToBytes.get("p.TestOwner")); + jarEntries.put("p/resource.txt", "# test resource".getBytes(StandardCharsets.UTF_8)); + JarUtils.createJarWithEntries(jar, jarEntries); + + try (var loader = JarUtils.loadJar(jar)) { + Controller controller = JarUtils.loadModule(jar, loader.classloader(), "m"); + Module module = controller.layer().findModule("m").orElseThrow(); + + Class ownerClass = module.getClassLoader().loadClass("p.TestOwner"); + + // first check we get a nice error message when accessing the resource + var e = expectThrows(ResourceNotFoundException.class, () -> WhitelistLoader.loadFromResourceFiles(ownerClass, "resource.txt")); + assertThat( + e.getMessage(), + equalTo( + "Whitelist file [p/resource.txt] not found from owning class [p.TestOwner]." + + " Check that the file exists and the package [p] is opened to module null" + ) + ); + + // now check we can actually read it once the package is opened to us + controller.addOpens(module, "p", WhitelistLoader.class.getModule()); + var whitelist = WhitelistLoader.loadFromResourceFiles(ownerClass, "resource.txt"); + assertThat(whitelist, notNullValue()); + } + } } diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 040a7a6205f9c..462fab651c211 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -88,6 +88,7 @@ grant codeBase "${codebase.elasticsearch}" { // this is the test-framework, but the jar is horribly named grant codeBase "${codebase.framework}" { permission java.lang.RuntimePermission "setSecurityManager"; + permission java.lang.RuntimePermission "createClassLoader"; }; grant codeBase "${codebase.elasticsearch-rest-client}" { @@ -129,4 +130,5 @@ grant { permission java.nio.file.LinkPermission "symbolic"; // needed for keystore tests permission java.lang.RuntimePermission "accessUserInformation"; + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/test/framework/src/main/java/org/elasticsearch/test/jar/JarUtils.java b/test/framework/src/main/java/org/elasticsearch/test/jar/JarUtils.java index e5bdd66e949f7..0da392cb7fb01 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/jar/JarUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/jar/JarUtils.java @@ -9,13 +9,24 @@ package org.elasticsearch.test.jar; +import org.elasticsearch.test.PrivilegedOperations.ClosableURLClassLoader; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.OutputStream; +import java.lang.module.Configuration; +import java.lang.module.ModuleFinder; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.List; import java.util.Map; +import java.util.Set; import java.util.jar.JarEntry; import java.util.jar.JarOutputStream; import java.util.jar.Manifest; @@ -85,6 +96,28 @@ public static void createJarWithEntriesUTF(Path jarfile, Map ent createJarWithEntries(jarfile, map); } + /** + * Creates a class loader for the given jar file. + * @param path Path to the jar file to load + * @return A URLClassLoader that will load classes from the jar. It should be closed when no longer needed. + */ + public static ClosableURLClassLoader loadJar(Path path) { + try { + URL[] urls = new URL[] { path.toUri().toURL() }; + return new ClosableURLClassLoader(URLClassLoader.newInstance(urls, JarUtils.class.getClassLoader())); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + public static ModuleLayer.Controller loadModule(Path path, ClassLoader loader, String name) { + var finder = ModuleFinder.of(path.getParent()); + var cf = Configuration.resolveAndBind(finder, List.of(ModuleLayer.boot().configuration()), ModuleFinder.of(), Set.of(name)); + return AccessController.doPrivileged( + (PrivilegedAction) () -> ModuleLayer.defineModulesWithOneLoader(cf, List.of(ModuleLayer.boot()), loader) + ); + } + @FunctionalInterface interface UncheckedIOFunction { R apply(T t) throws IOException; From 93aee0f1c6ccec241a6dce229b96e40ff36c9358 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 19 Dec 2024 16:43:15 +1100 Subject: [PATCH 24/62] Mute org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT #115727 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 35a9b31685794..12f1fc510a332 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -299,6 +299,8 @@ tests: - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} issue: https://github.com/elastic/elasticsearch/issues/116777 +- class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT + issue: https://github.com/elastic/elasticsearch/issues/115727 # Examples: # From d103036db1533b60ba217b4ab20d369e6a3be8d6 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 19 Dec 2024 09:53:18 +0100 Subject: [PATCH 25/62] Broaden index versions tested in some mappings tests (#119026) MINIMUM_READONLY_COMPATIBLE is used as a lower bound (N-2) as opposed to MINIMUM_COMPATIBLE (N-1). --- .../org/elasticsearch/index/mapper/MappingParserTests.java | 6 +++++- .../java/org/elasticsearch/indices/IndicesModuleTests.java | 7 ++----- .../xpack/spatial/index/mapper/ShapeFieldMapperTests.java | 7 +++++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index e0f58b8922be2..b87ab09c530d6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -322,7 +322,11 @@ public void testBlankFieldName() throws Exception { } public void testBlankFieldNameBefore8_6_0() throws Exception { - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_5_0); + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersions.V_8_5_0 + ); TransportVersion transportVersion = TransportVersionUtils.randomVersionBetween( random(), TransportVersions.MINIMUM_COMPATIBLE, diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index ab65d56557ad9..0e333491588a6 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.indices; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; @@ -98,8 +97,6 @@ public Map getMetadataMappers() { DataStreamTimestampFieldMapper.NAME, FieldNamesFieldMapper.NAME }; - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) - @AwaitsFix(bugUrl = "test is referencing 7.x index versions so needs to be updated for 9.0 bump") public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { @@ -239,14 +236,14 @@ public Map getMetadataMappers() { public void testFieldNamesIsLast() { IndicesModule module = new IndicesModule(Collections.emptyList()); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); + IndexVersion version = IndexVersionUtils.randomVersion(); List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } public void testFieldNamesIsLastWithPlugins() { IndicesModule module = new IndicesModule(fakePlugins); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); + IndexVersion version = IndexVersionUtils.randomVersion(); List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java index d030a2bbf81ad..5d2624735bebe 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java @@ -113,8 +113,11 @@ public void testDefaultConfiguration() throws IOException { } public void testDefaultDocValueConfigurationOnPre8_4() throws IOException { - // TODO verify which version this test is actually valid for (when PR is actually merged) - IndexVersion oldVersion = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_3_0); + IndexVersion oldVersion = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersions.V_8_3_0 + ); DocumentMapper defaultMapper = createDocumentMapper(oldVersion, fieldMapping(this::minimalMapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper(FIELD_NAME); assertThat(fieldMapper, instanceOf(fieldMapperClass())); From f760b40815ab30c1b07f01a72b7a81740a780bf1 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 19 Dec 2024 09:54:41 +0100 Subject: [PATCH 26/62] Broaden index versions tested to cover v7 versions for some analysis tests (#119024) This replaces usages of MINIMUM_COMPATIBLE with MINIMUM_READONLY_COMPATIBLE as a lower bound when randomizing the index version in some tests. This provides more coverage as it relies on readonly versions as opposed to only those that can be written to. --- .../analysis/common/SynonymsAnalysisTests.java | 12 ++++++------ .../analysis/common/UniqueTokenFilterTests.java | 6 +----- .../phonetic/AnalysisPhoneticFactoryTests.java | 2 +- .../index/analysis/PreBuiltAnalyzerTests.java | 4 ++++ .../index/similarity/SimilarityServiceTests.java | 6 +++--- .../script/VectorScoreScriptUtilsTests.java | 8 ++++---- .../script/field/vectors/DenseVectorTests.java | 6 +++++- 7 files changed, 24 insertions(+), 20 deletions(-) diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index 4fc6ca96b5f08..af57b8270ff02 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -118,7 +118,7 @@ public void testSynonymWordDeleteByAnalyzer() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -177,7 +177,7 @@ public void testSynonymWordDeleteByAnalyzerFromFile() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -231,7 +231,7 @@ public void testExpandSynonymWordDeleteByAnalyzer() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -338,7 +338,7 @@ public void testShingleFilters() { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonyms.type", "synonym") @@ -392,7 +392,7 @@ public void testPreconfiguredTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); @@ -424,7 +424,7 @@ public void testDisallowedTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .putList("common_words", "a", "b") diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java index 6bec8dc1ebc62..d30e9d3c68cc9 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java @@ -124,11 +124,7 @@ public void testOldVersionGetXUniqueTokenFilter() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.MINIMUM_COMPATIBLE, - IndexVersionUtils.getPreviousVersion(IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX) - ) + IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX) ) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java index 483c8ccef1202..e51d1f24a88ad 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java @@ -44,7 +44,7 @@ public void testDisallowedWithSynonyms() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index f5b86f422915e..7f3399cb24a15 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -55,6 +55,10 @@ public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersion.current()), is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersions.MINIMUM_COMPATIBLE)) ); + assertThat( + PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersion.current()), + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersions.MINIMUM_READONLY_COMPATIBLE)) + ); } public void testThatInstancesAreCachedAndReused() { diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index f6d7b3d1d65f3..ecb942492af53 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -74,7 +74,7 @@ public float score(float freq, long norm) { }; IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, negativeScoresSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_READONLY_COMPATIBLE, negativeScoresSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarities should not return negative scores")); @@ -99,7 +99,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, decreasingScoresWithFreqSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_READONLY_COMPATIBLE, decreasingScoresWithFreqSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not decrease when term frequency increases")); @@ -124,7 +124,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, increasingScoresWithNormSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_READONLY_COMPATIBLE, increasingScoresWithNormSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not increase when norm increases")); } diff --git a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java index dcaa64ede9e89..48d09c75cb2d1 100644 --- a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java @@ -51,12 +51,12 @@ public void testFloatVectorClassBindings() throws IOException { BinaryDenseVectorScriptDocValuesTests.wrap( new float[][] { docVector }, ElementType.FLOAT, - IndexVersions.MINIMUM_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE ), "test", ElementType.FLOAT, dims, - IndexVersions.MINIMUM_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE ), new BinaryDenseVectorDocValuesField( BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), @@ -303,12 +303,12 @@ public void testByteVsFloatSimilarity() throws IOException { BinaryDenseVectorScriptDocValuesTests.wrap( new float[][] { docVector }, ElementType.FLOAT, - IndexVersions.MINIMUM_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE ), "field0", ElementType.FLOAT, dims, - IndexVersions.MINIMUM_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE ), new BinaryDenseVectorDocValuesField( BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), diff --git a/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java b/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java index 8a5298777ede0..63d502a248aa8 100644 --- a/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java +++ b/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java @@ -69,7 +69,11 @@ public void testFloatVsListQueryVector() { assertEquals(knn.cosineSimilarity(arrayQV), knn.cosineSimilarity(listQV), 0.001f); assertEquals(knn.cosineSimilarity((Object) listQV), knn.cosineSimilarity((Object) arrayQV), 0.001f); - for (IndexVersion indexVersion : List.of(IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current())) { + for (IndexVersion indexVersion : List.of( + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersion.current() + )) { BytesRef value = BinaryDenseVectorScriptDocValuesTests.mockEncodeDenseVector(docVector, ElementType.FLOAT, indexVersion); BinaryDenseVector bdv = new BinaryDenseVector(docVector, value, dims, indexVersion); From 90f038d80237279e89cdb559da7e82b1896af677 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Thu, 19 Dec 2024 09:57:02 +0100 Subject: [PATCH 27/62] group dataset files (#118739) *.csv files used for creating data and *.csv-spec used to define test scenarios are blending in the resource directory. This change moves all *.csv files to data/*.csv so that it is easier to distinguish between data and specs. This allows to have a quicker overview of existing data when starting a new spec. --- .../java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java | 2 +- .../qa/testFixtures/src/main/resources/{ => data}/addresses.csv | 0 .../esql/qa/testFixtures/src/main/resources/{ => data}/ages.csv | 0 .../src/main/resources/{ => data}/airport_city_boundaries.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/airports.csv | 0 .../testFixtures/src/main/resources/{ => data}/airports_mp.csv | 0 .../testFixtures/src/main/resources/{ => data}/airports_web.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/alerts.csv | 0 .../esql/qa/testFixtures/src/main/resources/{ => data}/apps.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/books.csv | 0 .../src/main/resources/{ => data}/cartesian_multipolygons.csv | 0 .../testFixtures/src/main/resources/{ => data}/client_cidr.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/clientips.csv | 0 .../src/main/resources/{ => data}/countries_bbox.csv | 0 .../src/main/resources/{ => data}/countries_bbox_web.csv | 0 .../testFixtures/src/main/resources/{ => data}/date_nanos.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/decades.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/distances.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/employees.csv | 0 .../src/main/resources/{ => data}/employees_incompatible.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/heights.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/hosts.csv | 0 .../esql/qa/testFixtures/src/main/resources/{ => data}/k8s.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/languages.csv | 0 .../src/main/resources/{ => data}/languages_non_unique_key.csv | 0 .../src/main/resources/{ => data}/message_types.csv | 0 .../src/main/resources/{ => data}/missing_ip_sample_data.csv | 0 .../src/main/resources/{ => data}/multivalue_geometries.csv | 0 .../src/main/resources/{ => data}/multivalue_points.csv | 0 .../src/main/resources/{ => data}/mv_sample_data.csv | 0 .../testFixtures/src/main/resources/{ => data}/sample_data.csv | 0 .../src/main/resources/{ => data}/sample_data_ts_long.csv | 0 .../src/main/resources/{ => data}/sample_data_ts_nanos.csv | 0 .../src/main/resources/{ => data}/semantic_text.csv | 0 .../qa/testFixtures/src/main/resources/{ => data}/ul_logs.csv | 0 .../src/test/java/org/elasticsearch/xpack/esql/CsvTests.java | 2 +- 36 files changed, 2 insertions(+), 2 deletions(-) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/addresses.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/ages.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/airport_city_boundaries.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/airports.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/airports_mp.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/airports_web.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/alerts.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/apps.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/books.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/cartesian_multipolygons.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/client_cidr.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/clientips.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/countries_bbox.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/countries_bbox_web.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/date_nanos.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/decades.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/distances.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/employees.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/employees_incompatible.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/heights.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/hosts.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/k8s.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/languages.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/languages_non_unique_key.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/message_types.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/missing_ip_sample_data.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/multivalue_geometries.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/multivalue_points.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/mv_sample_data.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/sample_data.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/sample_data_ts_long.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/sample_data_ts_nanos.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/semantic_text.csv (100%) rename x-pack/plugin/esql/qa/testFixtures/src/main/resources/{ => data}/ul_logs.csv (100%) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 8e81d14b4dfd7..1d2de407219ee 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -359,7 +359,7 @@ private static void load(RestClient client, TestsDataset dataset, Logger logger, if (mapping == null) { throw new IllegalArgumentException("Cannot find resource " + mappingName); } - final String dataName = "/" + dataset.dataFileName; + final String dataName = "/data/" + dataset.dataFileName; URL data = CsvTestsDataLoader.class.getResource(dataName); if (data == null) { throw new IllegalArgumentException("Cannot find resource " + dataName); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/addresses.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/addresses.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/addresses.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/addresses.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ages.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/ages.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/ages.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/ages.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airport_city_boundaries.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airport_city_boundaries.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/airport_city_boundaries.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airport_city_boundaries.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports_mp.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports_mp.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports_mp.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports_mp.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports_web.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports_web.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports_web.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports_web.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/alerts.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/alerts.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/alerts.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/alerts.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/apps.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/apps.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/apps.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/apps.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/books.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/books.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/cartesian_multipolygons.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/cartesian_multipolygons.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/cartesian_multipolygons.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/cartesian_multipolygons.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/client_cidr.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/client_cidr.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/client_cidr.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/client_cidr.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/clientips.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/clientips.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/clientips.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/clientips.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/countries_bbox.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/countries_bbox.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/countries_bbox_web.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/countries_bbox_web.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/date_nanos.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/date_nanos.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/decades.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/decades.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/decades.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/decades.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/distances.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/distances.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/distances.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/distances.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/employees.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/employees.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees_incompatible.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/employees_incompatible.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees_incompatible.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/employees_incompatible.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/heights.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/heights.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/heights.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/heights.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/hosts.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/hosts.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/hosts.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/hosts.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/k8s.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/k8s.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_non_unique_key.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_non_unique_key.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_non_unique_key.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_non_unique_key.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/message_types.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/message_types.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/missing_ip_sample_data.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/missing_ip_sample_data.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/missing_ip_sample_data.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/missing_ip_sample_data.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/multivalue_geometries.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/multivalue_geometries.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/multivalue_geometries.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/multivalue_geometries.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/multivalue_points.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/multivalue_points.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/multivalue_points.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/multivalue_points.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_sample_data.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/mv_sample_data.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_sample_data.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/mv_sample_data.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_long.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data_ts_long.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_long.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data_ts_long.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data_ts_nanos.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data_ts_nanos.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/semantic_text.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/semantic_text.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ul_logs.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/ul_logs.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/ul_logs.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/ul_logs.csv diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index e627f99322f08..1e0374c648579 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -426,7 +426,7 @@ private static CsvTestsDataLoader.TestsDataset testsDataset(LogicalPlan parsed) } private static TestPhysicalOperationProviders testOperationProviders(CsvTestsDataLoader.TestsDataset dataset) throws Exception { - var testData = loadPageFromCsv(CsvTests.class.getResource("/" + dataset.dataFileName()), dataset.typeMapping()); + var testData = loadPageFromCsv(CsvTests.class.getResource("/data/" + dataset.dataFileName()), dataset.typeMapping()); return new TestPhysicalOperationProviders(testData.v1(), testData.v2()); } From 9cc6cd422912b253bb4410452c333609c4cefc7c Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Thu, 19 Dec 2024 10:22:13 +0100 Subject: [PATCH 28/62] ESQL: Fix attribute set equals (#118823) Also add a test that uses this, for lookup join field attribute ids. --- docs/changelog/118823.yaml | 5 +++ .../esql/core/expression/AttributeSet.java | 8 +++- .../core/expression/AttributeMapTests.java | 2 +- .../core/expression/AttributeSetTests.java | 42 +++++++++++++++++++ .../xpack/esql/analysis/AnalyzerTests.java | 31 ++++++++++++++ 5 files changed, 85 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/118823.yaml create mode 100644 x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeSetTests.java diff --git a/docs/changelog/118823.yaml b/docs/changelog/118823.yaml new file mode 100644 index 0000000000000..b1afe1c873c17 --- /dev/null +++ b/docs/changelog/118823.yaml @@ -0,0 +1,5 @@ +pr: 118823 +summary: Fix attribute set equals +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java index a092e17931237..8a075e8887512 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java @@ -174,8 +174,12 @@ public Stream parallelStream() { } @Override - public boolean equals(Object o) { - return delegate.equals(o); + public boolean equals(Object obj) { + if (obj instanceof AttributeSet as) { + obj = as.delegate; + } + + return delegate.equals(obj); } @Override diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeMapTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeMapTests.java index 511c7f4b1d2f8..ade79c8168076 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeMapTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeMapTests.java @@ -30,7 +30,7 @@ public class AttributeMapTests extends ESTestCase { - private static Attribute a(String name) { + static Attribute a(String name) { return new UnresolvedAttribute(Source.EMPTY, name); } diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeSetTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeSetTests.java new file mode 100644 index 0000000000000..0e97773fb90d2 --- /dev/null +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeSetTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.core.expression; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.AttributeMapTests.a; + +public class AttributeSetTests extends ESTestCase { + + public void testEquals() { + Attribute a1 = a("1"); + Attribute a2 = a("2"); + + AttributeSet first = new AttributeSet(List.of(a1, a2)); + assertEquals(first, first); + + AttributeSet second = new AttributeSet(); + second.add(a1); + second.add(a2); + + assertEquals(first, second); + assertEquals(second, first); + + AttributeSet third = new AttributeSet(); + third.add(a("1")); + third.add(a("2")); + + assertNotEquals(first, third); + assertNotEquals(third, first); + + assertEquals(AttributeSet.EMPTY, AttributeSet.EMPTY); + assertEquals(AttributeSet.EMPTY, first.intersect(third)); + assertEquals(third.intersect(first), AttributeSet.EMPTY); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 5d1ff43dfe31b..674eda8916c5a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; @@ -2190,6 +2191,36 @@ public void testLookupJoinUnknownField() { assertThat(e.getMessage(), containsString(errorMessage3 + "right side of join")); } + public void testMultipleLookupJoinsGiveDifferentAttributes() { + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); + + // The field attributes that get contributed by different LOOKUP JOIN commands must have different name ids, + // even if they have the same names. Otherwise, things like dependency analysis - like in PruneColumns - cannot work based on + // name ids and shadowing semantics proliferate into all kinds of optimizer code. + + String query = "FROM test" + + "| EVAL language_code = languages" + + "| LOOKUP JOIN languages_lookup ON language_code" + + "| LOOKUP JOIN languages_lookup ON language_code"; + LogicalPlan analyzedPlan = analyze(query); + + List lookupFields = new ArrayList<>(); + List> lookupFieldNames = new ArrayList<>(); + analyzedPlan.forEachUp(EsRelation.class, esRelation -> { + if (esRelation.indexMode() == IndexMode.LOOKUP) { + lookupFields.add(esRelation.outputSet()); + lookupFieldNames.add(esRelation.outputSet().stream().map(NamedExpression::name).collect(Collectors.toSet())); + } + }); + + assertEquals(lookupFieldNames.size(), 2); + assertEquals(lookupFieldNames.get(0), lookupFieldNames.get(1)); + + assertEquals(lookupFields.size(), 2); + AttributeSet intersection = lookupFields.get(0).intersect(lookupFields.get(1)); + assertEquals(AttributeSet.EMPTY, intersection); + } + public void testLookupJoinIndexMode() { assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); From 8845cf725bf7412c7d53cfaac261026f95d78601 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 19 Dec 2024 10:32:47 +0100 Subject: [PATCH 29/62] Include read-only index versions in randomCompatbileVersion/randomCompatiblePreviousVersion (#119013) Read-only versions are now included in all index versions, see #118793 . The next step is to broaden testing where possible to include index versions that cannot be written to. To do that, we change the behaviour of the existing randomCompatbileVersion and randomCompatiblePreviousVersion and methods to include read-only versions and we introduce corresponding variants of these methods that only return write compatible index versions. As part of this change, we can also remove some of the `@UpdateForV9` annotations that relate to v7 index versions which randomCompatibleVersion no longer covered. That's now fixed and such tests can simply be restored. --- .../search/GeoBoundingBoxQueryLegacyGeoShapeIT.java | 2 +- .../legacygeo/search/LegacyGeoShapeIT.java | 2 +- .../legacygeo/GeoJsonShapeParserTests.java | 3 --- .../legacygeo/GeoWKTShapeParserTests.java | 7 ------- .../mapper/LegacyGeoShapeFieldMapperTests.java | 4 ---- .../legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java | 4 ---- .../action/admin/indices/create/CloneIndexIT.java | 2 +- .../action/admin/indices/create/SplitIndexIT.java | 4 ++-- .../seqno/PeerRecoveryRetentionLeaseCreationIT.java | 2 +- .../search/aggregations/bucket/GeoDistanceIT.java | 2 +- .../search/aggregations/bucket/GeoHashGridIT.java | 2 +- .../search/functionscore/DecayFunctionScoreIT.java | 2 +- .../search/geo/GeoBoundingBoxQueryGeoPointIT.java | 2 +- .../org/elasticsearch/search/geo/GeoDistanceIT.java | 2 +- .../org/elasticsearch/search/geo/GeoPolygonIT.java | 2 +- .../org/elasticsearch/search/sort/GeoDistanceIT.java | 8 ++++---- .../search/sort/GeoDistanceSortBuilderIT.java | 6 +++--- .../cluster/metadata/IndexMetadataVerifierTests.java | 2 +- .../replication/RetentionLeasesReplicationTests.java | 2 +- .../test/test/InternalClusterForbiddenSettingIT.java | 4 ++-- .../elasticsearch/test/index/IndexVersionUtils.java | 10 ++++++++++ .../xpack/spatial/search/CartesianShapeIT.java | 2 +- .../GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java | 2 +- .../xpack/spatial/search/GeoShapeWithDocValuesIT.java | 2 +- .../mapper/GeoShapeWithDocValuesFieldMapperTests.java | 3 --- 25 files changed, 36 insertions(+), 47 deletions(-) diff --git a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java index 37c31c8af47b0..d2dd5b7442dd2 100644 --- a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java +++ b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java @@ -45,6 +45,6 @@ public XContentBuilder getMapping() throws IOException { @Override public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } } diff --git a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java index 918c343b79b7b..73b7c07c45fe5 100644 --- a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java +++ b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java @@ -41,7 +41,7 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { @Override protected IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java index 9b83cd9ffdb2b..bd5b289abc588 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.Line; @@ -344,8 +343,6 @@ public void testParsePolygon() throws IOException, ParseException { assertGeometryEquals(p, polygonGeoJson, false); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParse3DPolygon() throws IOException, ParseException { XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java index 5d0df9215ef25..f944a368b2a6c 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.MultiLine; @@ -303,8 +302,6 @@ public void testParseMixedDimensionPolyWithHole() throws IOException, ParseExcep assertThat(e, hasToString(containsString("coordinate dimensions do not match"))); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0)); @@ -338,8 +335,6 @@ public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match"))); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParsePolyWithStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0, 0)); @@ -363,8 +358,6 @@ public void testParsePolyWithStoredZ() throws IOException { assertEquals(shapeBuilder.numDimensions(), 3); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseOpenPolygon() throws IOException { String openPolygon = "POLYGON ((100 5, 100 10, 90 10, 90 5))"; diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java index 7352b4d88a42b..c97b0a28d22de 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java @@ -13,7 +13,6 @@ import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; -import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; @@ -21,7 +20,6 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -56,8 +54,6 @@ import static org.mockito.Mockito.when; @SuppressWarnings("deprecation") -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldMapperTests extends MapperTestCase { @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java index bf616c8190324..f5e09f19c1a71 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java @@ -8,9 +8,7 @@ */ package org.elasticsearch.legacygeo.mapper; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.geo.SpatialStrategy; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.FieldTypeTestCase; @@ -23,8 +21,6 @@ import java.util.List; import java.util.Map; -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldTypeTests extends FieldTypeTestCase { /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java index 47f96aebacd7d..fa2b053ead348 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -39,7 +39,7 @@ protected boolean forbidPrivateIndexSettings() { } public void testCreateCloneIndex() { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); int numPrimaryShards = randomIntBetween(1, 5); prepareCreate("source").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 8391ab270b1d1..9ba6ac4bd9c58 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -341,8 +341,8 @@ private static IndexMetadata indexMetadata(final Client client, final String ind return clusterStateResponse.getState().metadata().index(index); } - public void testCreateSplitIndex() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + public void testCreateSplitIndex() { + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); prepareCreate("source").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", version) ).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java index 07f9d9ee7b6c3..92e5eb8e046bc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java @@ -48,7 +48,7 @@ public void testCanRecoverFromStoreWithoutPeerRecoveryRetentionLease() throws Ex Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleWriteVersion(random())) ) ); ensureGreen(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 907f943e68422..6c67bd2a98606 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -56,7 +56,7 @@ protected boolean forbidPrivateIndexSettings() { return false; } - private final IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + private final IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 1ad7d1a11bea7..1de51d6df8197 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -49,7 +49,7 @@ protected boolean forbidPrivateIndexSettings() { return false; } - private final IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + private final IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); static Map expectedDocCountsForGeoHash = null; static Map multiValuedExpectedDocCountsForGeoHash = null; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index 9988624f6a677..a55edf3782bcc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -748,7 +748,7 @@ public void testDateWithoutOrigin() throws Exception { } public void testManyDocsLin() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = jsonBuilder().startObject() .startObject("_doc") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java index 2489889be19e5..8104e4ed7a825 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java @@ -32,6 +32,6 @@ public XContentBuilder getMapping() throws IOException { @Override public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 9b4e28055a988..a309fa81f6dc1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -96,7 +96,7 @@ protected boolean forbidPrivateIndexSettings() { @Before public void setupTestIndex() throws IOException { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java index 4b8f29f3cc9a5..aadefd9bd8018 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java @@ -39,7 +39,7 @@ protected boolean forbidPrivateIndexSettings() { @Override protected void setupSuiteScopeCluster() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java index e80678c4f5fc6..f55d4505f3f58 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -45,7 +45,7 @@ protected boolean forbidPrivateIndexSettings() { } public void testDistanceSortingMVFields() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -237,7 +237,7 @@ public void testDistanceSortingMVFields() throws Exception { // Regression bug: // https://github.com/elastic/elasticsearch/issues/2851 public void testDistanceSortingWithMissingGeoPoint() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -299,7 +299,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { } public void testDistanceSortingNestedFields() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -551,7 +551,7 @@ public void testDistanceSortingNestedFields() throws Exception { * Issue 3073 */ public void testGeoDistanceFilter() throws IOException { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; double lon = -73.998776; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index aabca1b9333f8..d53c90a5d1e28 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -60,7 +60,7 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce * |___________________________ * 1 2 3 4 5 6 7 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -152,7 +152,7 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -225,7 +225,7 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept * |______________________ * 1 2 3 4 5 6 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index 3b122864aa472..6ee86470861b4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -97,7 +97,7 @@ public void testCustomSimilarity() { .put("index.similarity.my_similarity.after_effect", "l") .build() ); - service.verifyIndexMetadata(src, IndexVersions.MINIMUM_COMPATIBLE); + service.verifyIndexMetadata(src, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } public void testIncompatibleVersion() { diff --git a/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java index 8f45a15c73bb6..1f82d7998257e 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java @@ -147,7 +147,7 @@ protected void syncRetentionLeases(ShardId id, RetentionLeases leases, ActionLis public void testTurnOffTranslogRetentionAfterAllShardStarted() throws Exception { final Settings.Builder settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); if (randomBoolean()) { - settings.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleVersion(random())); + settings.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleWriteVersion(random())); } try (ReplicationGroup group = createGroup(between(1, 2), settings.build())) { group.startAll(); diff --git a/test/framework/src/integTest/java/org/elasticsearch/test/test/InternalClusterForbiddenSettingIT.java b/test/framework/src/integTest/java/org/elasticsearch/test/test/InternalClusterForbiddenSettingIT.java index d13450fbb52dd..2033743354f34 100644 --- a/test/framework/src/integTest/java/org/elasticsearch/test/test/InternalClusterForbiddenSettingIT.java +++ b/test/framework/src/integTest/java/org/elasticsearch/test/test/InternalClusterForbiddenSettingIT.java @@ -26,7 +26,7 @@ protected boolean forbidPrivateIndexSettings() { } public void testRestart() throws Exception { - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersion.current()); + IndexVersion version = IndexVersionUtils.randomPreviousCompatibleWriteVersion(random(), IndexVersion.current()); // create / delete an index with forbidden setting prepareCreate("test").setSettings(settings(version).build()).get(); indicesAdmin().prepareDelete("test").get(); @@ -38,7 +38,7 @@ public void testRestart() throws Exception { } public void testRollingRestart() throws Exception { - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersion.current()); + IndexVersion version = IndexVersionUtils.randomPreviousCompatibleWriteVersion(random(), IndexVersion.current()); // create / delete an index with forbidden setting prepareCreate("test").setSettings(settings(version).build()).get(); indicesAdmin().prepareDelete("test").get(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java index 592cffac33552..667149e4bdd3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java @@ -122,11 +122,21 @@ public static IndexVersion getNextVersion(IndexVersion version) { /** Returns a random {@code IndexVersion} that is compatible with {@link IndexVersion#current()} */ public static IndexVersion randomCompatibleVersion(Random random) { + return randomVersionBetween(random, IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()); + } + + /** Returns a random {@code IndexVersion} that is compatible with {@link IndexVersion#current()} and can be written to */ + public static IndexVersion randomCompatibleWriteVersion(Random random) { return randomVersionBetween(random, IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); } /** Returns a random {@code IndexVersion} that is compatible with the previous version to {@code version} */ public static IndexVersion randomPreviousCompatibleVersion(Random random, IndexVersion version) { + return randomVersionBetween(random, IndexVersions.MINIMUM_READONLY_COMPATIBLE, getPreviousVersion(version)); + } + + /** Returns a random {@code IndexVersion} that is compatible with the previous version to {@code version} and can be written to */ + public static IndexVersion randomPreviousCompatibleWriteVersion(Random random, IndexVersion version) { return randomVersionBetween(random, IndexVersions.MINIMUM_COMPATIBLE, getPreviousVersion(version)); } } diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java index 83fbd7262461d..eb4515a897118 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java @@ -36,7 +36,7 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { @Override protected IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } @Override diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java index 3d91fb443aabd..4a6fa5d545bef 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java @@ -40,6 +40,6 @@ public XContentBuilder getMapping() throws IOException { @Override public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } } diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java index b4d7a472591bd..0857b078be579 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java @@ -60,7 +60,7 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { @Override protected IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } @Override diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java index 4b13a7bf1f829..58fde288cfc60 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.utils.GeometryValidator; import org.elasticsearch.geometry.utils.WellKnownBinary; @@ -280,8 +279,6 @@ public void testInvalidCurrentVersion() { ); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public void testGeoShapeLegacyMerge() throws Exception { IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); MapperService m = createMapperService(version, fieldMapping(b -> b.field("type", getFieldName()))); From 1cf5b03b31716a5424a60a75aeade6d7379ca761 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 19 Dec 2024 21:08:51 +1100 Subject: [PATCH 30/62] Mute org.elasticsearch.cluster.coordination.NodeJoinExecutorTests testSuccess #119052 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 12f1fc510a332..8cfc7c082473f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -301,6 +301,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116777 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT issue: https://github.com/elastic/elasticsearch/issues/115727 +- class: org.elasticsearch.cluster.coordination.NodeJoinExecutorTests + method: testSuccess + issue: https://github.com/elastic/elasticsearch/issues/119052 # Examples: # From 4efeca83b4a9f40f5a8fac9a8a3b4f2191d5282e Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Thu, 19 Dec 2024 12:20:12 +0200 Subject: [PATCH 31/62] [Failure store] Reconciliate failure indices during snapshotting (#118834) In this PR we reconciliate the failure indices of a data stream just like we do for the backing indices. The only difference is that a data stream can have an empty list of failure indices, while it cannot have an empty list of backing indices. An easy way to create a situation where certain backing or failure indices are not included in a snapshot is via using exclusions in the multi-target expression of the snapshot. For example: ``` PUT /_snapshot/my_repository/my-snapshot?wait_for_completion=true { "indices": "my-ds*", "-.fs-my-ds-000001" } ``` --- .../datastreams/DataStreamsSnapshotsIT.java | 279 +++++++++--------- .../cluster/metadata/DataStream.java | 49 ++- .../snapshots/SnapshotsService.java | 10 +- .../cluster/metadata/DataStreamTests.java | 66 ++++- 4 files changed, 224 insertions(+), 180 deletions(-) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 286ad68896797..32d080ccc46b1 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -60,7 +60,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -77,6 +76,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase { @@ -145,18 +145,11 @@ public void setup() throws Exception { // Resolve backing index names after data streams have been created: // (these names have a date component, and running around midnight could lead to test failures otherwise) - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); - GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); - dsBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(); - otherDsBackingIndexName = getDataStreamResponse.getDataStreams().get(1).getDataStream().getIndices().get(0).getName(); - fsBackingIndexName = getDataStreamResponse.getDataStreams().get(2).getDataStream().getIndices().get(0).getName(); - fsFailureIndexName = getDataStreamResponse.getDataStreams() - .get(2) - .getDataStream() - .getFailureIndices() - .getIndices() - .get(0) - .getName(); + List dataStreamInfos = getDataStreamInfo("*"); + dsBackingIndexName = dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName(); + otherDsBackingIndexName = dataStreamInfos.get(1).getDataStream().getIndices().get(0).getName(); + fsBackingIndexName = dataStreamInfos.get(2).getDataStream().getIndices().get(0).getName(); + fsFailureIndexName = dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices().get(0).getName(); // Will be used in some tests, to test renaming while restoring a snapshot: ds2BackingIndexName = dsBackingIndexName.replace("-ds-", "-ds2-"); @@ -198,9 +191,7 @@ public void testSnapshotAndRestore() throws Exception { assertEquals(Collections.singletonList(dsBackingIndexName), getSnapshot(REPO, SNAPSHOT).indices()); - assertAcked( - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" })) - ); + assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "ds"))); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() @@ -218,13 +209,10 @@ public void testSnapshotAndRestore() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(dsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List ds = getDataStreamInfo("ds"); + assertEquals(1, ds.size()); + assertEquals(1, ds.get(0).getDataStream().getIndices().size()); + assertEquals(dsBackingIndexName, ds.get(0).getDataStream().getIndices().get(0).getName()); GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "other-ds")); @@ -278,19 +266,18 @@ public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); - GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).get(); + List dataStreamInfos = getDataStreamInfo("*"); assertThat( - ds.getDataStreams().stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), + dataStreamInfos.stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), contains(equalTo("ds"), equalTo("other-ds"), equalTo("with-fs")) ); - List backingIndices = ds.getDataStreams().get(0).getDataStream().getIndices(); + List backingIndices = dataStreamInfos.get(0).getDataStream().getIndices(); assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(dsBackingIndexName)); - backingIndices = ds.getDataStreams().get(1).getDataStream().getIndices(); + backingIndices = dataStreamInfos.get(1).getDataStream().getIndices(); assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(otherDsBackingIndexName)); - backingIndices = ds.getDataStreams().get(2).getDataStream().getIndices(); + backingIndices = dataStreamInfos.get(2).getDataStream().getIndices(); assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(fsBackingIndexName)); - List failureIndices = ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices(); + List failureIndices = dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices(); assertThat(failureIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(fsFailureIndexName)); } @@ -337,14 +324,10 @@ public void testSnapshotAndRestoreInPlace() { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); - GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).actionGet(); - assertThat( - ds.getDataStreams().stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), - contains(equalTo("ds")) - ); - List backingIndices = ds.getDataStreams().get(0).getDataStream().getIndices(); - assertThat(ds.getDataStreams().get(0).getDataStream().getIndices(), hasSize(1)); + List dsInfo = getDataStreamInfo("ds"); + assertThat(dsInfo.stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), contains(equalTo("ds"))); + List backingIndices = dsInfo.get(0).getDataStream().getIndices(); + assertThat(dsInfo.get(0).getDataStream().getIndices(), hasSize(1)); assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(equalTo(dsBackingIndexName))); // The backing index created as part of rollover should still exist (but just not part of the data stream) @@ -357,39 +340,40 @@ public void testSnapshotAndRestoreInPlace() { } public void testFailureStoreSnapshotAndRestore() throws Exception { + String dataStreamName = "with-fs"; CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices("with-fs") + .setIndices(dataStreamName) .setIncludeGlobalState(false) .get(); RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); assertEquals(RestStatus.OK, status); + assertThat(getSnapshot(REPO, SNAPSHOT).dataStreams(), containsInAnyOrder(dataStreamName)); assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(fsBackingIndexName, fsFailureIndexName)); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "with-fs"))); + assertAcked( + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, dataStreamName)) + ); { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices("with-fs") + .setIndices(dataStreamName) .get(); assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "with-fs" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(fsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(fsFailureIndexName, ds.getDataStreams().get(0).getDataStream().getFailureIndices().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo(dataStreamName); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(fsBackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); + assertEquals(fsFailureIndexName, dataStreamInfos.get(0).getDataStream().getFailureIndices().getIndices().get(0).getName()); } { // With rename pattern @@ -397,21 +381,18 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { .cluster() .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices("with-fs") + .setIndices(dataStreamName) .setRenamePattern("-fs") .setRenameReplacement("-fs2") .get(); assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "with-fs2" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(fs2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(fs2FailureIndexName, ds.getDataStreams().get(0).getDataStream().getFailureIndices().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("with-fs2"); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(fs2BackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); + assertEquals(fs2FailureIndexName, dataStreamInfos.get(0).getDataStream().getFailureIndices().getIndices().get(0).getName()); } } @@ -477,13 +458,10 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamToSnapshot }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(backingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo(dataStreamToSnapshot); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(backingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), contains(dataStreamToSnapshot)); @@ -536,13 +514,10 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) - ).get(); - assertEquals(3, ds.getDataStreams().size()); + List dataStreamInfos = getDataStreamInfo("*"); + assertEquals(3, dataStreamInfos.size()); assertThat( - ds.getDataStreams().stream().map(i -> i.getDataStream().getName()).collect(Collectors.toList()), + dataStreamInfos.stream().map(i -> i.getDataStream().getName()).collect(Collectors.toList()), containsInAnyOrder("ds", "other-ds", "with-fs") ); @@ -596,19 +571,16 @@ public void testSnapshotAndRestoreAll() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) - ).get(); - assertEquals(3, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(dsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(1).getDataStream().getIndices().size()); - assertEquals(otherDsBackingIndexName, ds.getDataStreams().get(1).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size()); - assertEquals(fsBackingIndexName, ds.getDataStreams().get(2).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().size()); - assertEquals(fsFailureIndexName, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("*"); + assertEquals(3, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(dsBackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(1).getDataStream().getIndices().size()); + assertEquals(otherDsBackingIndexName, dataStreamInfos.get(1).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(2).getDataStream().getIndices().size()); + assertEquals(fsBackingIndexName, dataStreamInfos.get(2).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices().size()); + assertEquals(fsFailureIndexName, dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices().get(0).getName()); GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "other-ds")); @@ -667,19 +639,16 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) - ).get(); - assertEquals(3, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(dsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(1).getDataStream().getIndices().size()); - assertEquals(otherDsBackingIndexName, ds.getDataStreams().get(1).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size()); - assertEquals(fsBackingIndexName, ds.getDataStreams().get(2).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size()); - assertEquals(fsFailureIndexName, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("*"); + assertEquals(3, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(dsBackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(1).getDataStream().getIndices().size()); + assertEquals(otherDsBackingIndexName, dataStreamInfos.get(1).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(2).getDataStream().getIndices().size()); + assertEquals(fsBackingIndexName, dataStreamInfos.get(2).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(2).getDataStream().getIndices().size()); + assertEquals(fsFailureIndexName, dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices().get(0).getName()); GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("*")).actionGet(); assertThat(getAliasesResponse.getDataStreamAliases(), anEmptyMap()); @@ -721,13 +690,10 @@ public void testRename() throws Exception { .setRenameReplacement("ds2") .get(); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds2" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(ds2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("ds2"); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(ds2BackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); assertResponse( client.prepareSearch("ds2"), response -> assertEquals(DOCUMENT_SOURCE, response.getHits().getHits()[0].getSourceAsMap()) @@ -779,13 +745,10 @@ public void testRenameWriteDataStream() throws Exception { .setRenameReplacement("other-ds2") .get(); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "other-ds2" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(otherDs2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("other-ds2"); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(otherDs2BackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "other-ds", "other-ds2")); @@ -849,9 +812,8 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { assertThat(restoreSnapshotResponse.status(), is(RestStatus.OK)); - GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); - GetDataStreamAction.Response response = client.execute(GetDataStreamAction.INSTANCE, getDSRequest).actionGet(); - assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); + List dataStreamInfos = getDataStreamInfo("ds"); + assertThat(dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); } public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { @@ -888,17 +850,15 @@ public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { assertThat(restoreSnapshotResponse.status(), is(RestStatus.OK)); // assert "ds" was restored as "test-ds" and the backing index has a valid name - GetDataStreamAction.Request getRenamedDS = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "test-ds" }); - GetDataStreamAction.Response response = client.execute(GetDataStreamAction.INSTANCE, getRenamedDS).actionGet(); + List dataStreamInfos = getDataStreamInfo("test-ds"); assertThat( - response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), + dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName(), is(DataStream.getDefaultBackingIndexName("test-ds", 1L)) ); // data stream "ds" should still exist in the system - GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); - response = client.execute(GetDataStreamAction.INSTANCE, getDSRequest).actionGet(); - assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); + dataStreamInfos = getDataStreamInfo("ds"); + assertThat(dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); } public void testWildcards() throws Exception { @@ -924,16 +884,13 @@ public void testWildcards() throws Exception { assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds2" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(ds2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("ds2"); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(ds2BackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); assertThat( "we renamed the restored data stream to one that doesn't match any existing composable template", - ds.getDataStreams().get(0).getIndexTemplate(), + dataStreamInfos.get(0).getIndexTemplate(), is(nullValue()) ); } @@ -955,7 +912,7 @@ public void testDataStreamNotStoredWhenIndexRequested() { ); } - public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { + public void testDataStreamNotRestoredWhenIndexRequested() { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, "snap2") @@ -984,7 +941,7 @@ public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { expectThrows(ResourceNotFoundException.class, client.execute(GetDataStreamAction.INSTANCE, getRequest)); } - public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionException, InterruptedException { + public void testDataStreamNotIncludedInLimitedSnapshot() { final String snapshotName = "test-snap"; CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() @@ -1042,12 +999,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { assertDocCount(dataStream, 100L); // Resolve backing index name after the data stream has been created because it has a date component, // and running around midnight could lead to test failures otherwise - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( - TEST_REQUEST_TIMEOUT, - new String[] { dataStream } - ); - GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); - String backingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(); + String backingIndexName = getDataStreamInfo(dataStream).get(0).getDataStream().getIndices().get(0).getName(); logger.info("--> snapshot"); ActionFuture future = client1.admin() @@ -1235,7 +1187,7 @@ public void testSnapshotDSDuringRolloverAndDeleteOldIndex() throws Exception { assertEquals(restoreSnapshotResponse.failedShards(), 0); } - public void testExcludeDSFromSnapshotWhenExcludingItsIndices() { + public void testExcludeDSFromSnapshotWhenExcludingAnyOfItsIndices() { final String snapshot = "test-snapshot"; final String indexWithoutDataStream = "test-idx-no-ds"; createIndexWithContent(indexWithoutDataStream); @@ -1251,10 +1203,47 @@ public void testExcludeDSFromSnapshotWhenExcludingItsIndices() { .getRestoreInfo(); assertThat(restoreInfo.failedShards(), is(0)); assertThat(restoreInfo.successfulShards(), is(1)); + + // Exclude only failure store indices + { + String dataStreamName = "with-fs"; + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIndices(dataStreamName + "*", "-.fs*") + .setIncludeGlobalState(false) + .get(); + + RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); + assertEquals(RestStatus.OK, status); + + SnapshotInfo retrievedSnapshot = getSnapshot(REPO, SNAPSHOT); + assertThat(retrievedSnapshot.dataStreams(), contains(dataStreamName)); + assertThat(retrievedSnapshot.indices(), containsInAnyOrder(fsBackingIndexName)); + + assertAcked( + safeGet(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*"))) + ); + + RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIndices(dataStreamName) + .get() + .getRestoreInfo(); + + assertThat(restoreSnapshotResponse, notNullValue()); + assertThat(restoreSnapshotResponse.successfulShards(), equalTo(restoreSnapshotResponse.totalShards())); + assertThat(restoreSnapshotResponse.failedShards(), is(0)); + + GetDataStreamAction.Response.DataStreamInfo dataStream = getDataStreamInfo(dataStreamName).getFirst(); + assertThat(dataStream.getDataStream().getBackingIndices().getIndices(), not(empty())); + assertThat(dataStream.getDataStream().getFailureIndices().getIndices(), empty()); + } } /** - * This test is a copy of the {@link #testExcludeDSFromSnapshotWhenExcludingItsIndices()} the only difference + * This test is a copy of the {@link #testExcludeDSFromSnapshotWhenExcludingAnyOfItsIndices()} ()} the only difference * is that one include the global state and one doesn't. In general this shouldn't matter that's why it used to be * a random parameter of the test, but because of #107515 it fails when we include the global state. Keep them * separate until this is fixed. @@ -1284,10 +1273,7 @@ public void testRestoreSnapshotFully() throws Exception { createIndexWithContent(indexName); createFullSnapshot(REPO, snapshotName); - assertAcked( - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" })) - .get() - ); + assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.lenientExpandOpenHidden()).get()); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() @@ -1297,8 +1283,7 @@ public void testRestoreSnapshotFully() throws Exception { .get(); assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); - GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); - assertThat(client.execute(GetDataStreamAction.INSTANCE, getRequest).get().getDataStreams(), hasSize(3)); + assertThat(getDataStreamInfo("*"), hasSize(3)); assertNotNull(client.admin().indices().prepareGetIndex().setIndices(indexName).get()); } @@ -1326,7 +1311,7 @@ public void testRestoreDataStreamAliasWithConflictingDataStream() throws Excepti } } - public void testRestoreDataStreamAliasWithConflictingIndicesAlias() throws Exception { + public void testRestoreDataStreamAliasWithConflictingIndicesAlias() { var snapshotName = "test-snapshot"; createFullSnapshot(REPO, snapshotName); client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).actionGet(); @@ -1484,4 +1469,8 @@ public void testWarningHeaderOnRestoreTemplateFromSnapshot() throws Exception { } + protected List getDataStreamInfo(String... dataStreamNames) { + GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, dataStreamNames); + return safeGet(client.execute(GetDataStreamAction.INSTANCE, getRequest)).getDataStreams(); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 7745ec9cc75b2..db602ef6ef291 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -49,7 +49,6 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.ArrayList; -import java.util.Collection; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -794,27 +793,57 @@ public DataStream promoteDataStream() { /** * Reconciles this data stream with a list of indices available in a snapshot. Allows snapshots to store accurate data - * stream definitions that do not reference backing indices not contained in the snapshot. + * stream definitions that do not reference backing indices and failure indices not contained in the snapshot. * * @param indicesInSnapshot List of indices in the snapshot + * @param snapshotMetadataBuilder a metadata builder with the current view of the snapshot metadata * @return Reconciled {@link DataStream} instance or {@code null} if no reconciled version of this data stream could be built from the * given indices */ @Nullable - public DataStream snapshot(Collection indicesInSnapshot) { + public DataStream snapshot(Set indicesInSnapshot, Metadata.Builder snapshotMetadataBuilder) { + boolean backingIndicesChanged = false; + boolean failureIndicesChanged = false; + // do not include indices not available in the snapshot - List reconciledIndices = new ArrayList<>(this.backingIndices.indices); - if (reconciledIndices.removeIf(x -> indicesInSnapshot.contains(x.getName()) == false) == false) { + List reconciledBackingIndices = this.backingIndices.indices; + if (isAnyIndexMissing(this.backingIndices.getIndices(), snapshotMetadataBuilder, indicesInSnapshot)) { + reconciledBackingIndices = new ArrayList<>(this.backingIndices.indices); + backingIndicesChanged = reconciledBackingIndices.removeIf(x -> indicesInSnapshot.contains(x.getName()) == false); + if (reconciledBackingIndices.isEmpty()) { + return null; + } + } + + List reconciledFailureIndices = this.failureIndices.indices; + if (DataStream.isFailureStoreFeatureFlagEnabled() + && isAnyIndexMissing(failureIndices.indices, snapshotMetadataBuilder, indicesInSnapshot)) { + reconciledFailureIndices = new ArrayList<>(this.failureIndices.indices); + failureIndicesChanged = reconciledFailureIndices.removeIf(x -> indicesInSnapshot.contains(x.getName()) == false); + } + + if (backingIndicesChanged == false && failureIndicesChanged == false) { return this; } - if (reconciledIndices.size() == 0) { - return null; + Builder builder = copy(); + if (backingIndicesChanged) { + builder.setBackingIndices(backingIndices.copy().setIndices(reconciledBackingIndices).build()); + } + if (failureIndicesChanged) { + builder.setFailureIndices(failureIndices.copy().setIndices(reconciledFailureIndices).build()); } + return builder.setMetadata(metadata == null ? null : new HashMap<>(metadata)).build(); + } - return copy().setBackingIndices(backingIndices.copy().setIndices(reconciledIndices).build()) - .setMetadata(metadata == null ? null : new HashMap<>(metadata)) - .build(); + private static boolean isAnyIndexMissing(List indices, Metadata.Builder builder, Set indicesInSnapshot) { + for (Index index : indices) { + final String indexName = index.getName(); + if (builder.get(indexName) == null || indicesInSnapshot.contains(indexName) == false) { + return true; + } + } + return false; } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 8d526f3e114e1..6f690a9e6ccd5 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -786,15 +786,7 @@ private static Metadata metadataForSnapshot(SnapshotsInProgress.Entry snapshot, assert snapshot.partial() : "Data stream [" + dataStreamName + "] was deleted during a snapshot but snapshot was not partial."; } else { - boolean missingIndex = false; - for (Index index : dataStream.getIndices()) { - final String indexName = index.getName(); - if (builder.get(indexName) == null || indicesInSnapshot.contains(indexName) == false) { - missingIndex = true; - break; - } - } - final DataStream reconciled = missingIndex ? dataStream.snapshot(indicesInSnapshot) : dataStream; + final DataStream reconciled = dataStream.snapshot(indicesInSnapshot, builder); if (reconciled != null) { dataStreams.put(dataStreamName, reconciled); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 7108a4fd4f19e..cfdcfe48c8d9a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -45,8 +45,10 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; @@ -866,23 +868,39 @@ public void testReplaceFailureIndexThrowsExceptionIfReplacingWriteIndex() { } public void testSnapshot() { - var preSnapshotDataStream = DataStreamTestHelper.randomInstance(); - var indicesToRemove = randomSubsetOf(preSnapshotDataStream.getIndices()); - if (indicesToRemove.size() == preSnapshotDataStream.getIndices().size()) { + var preSnapshotDataStream = DataStreamTestHelper.randomInstance(true); + + // Mutate backing indices + var backingIndicesToRemove = randomSubsetOf(preSnapshotDataStream.getIndices()); + if (backingIndicesToRemove.size() == preSnapshotDataStream.getIndices().size()) { // never remove them all - indicesToRemove.remove(0); + backingIndicesToRemove.remove(0); } - var indicesToAdd = randomIndexInstances(); - var postSnapshotIndices = new ArrayList<>(preSnapshotDataStream.getIndices()); - postSnapshotIndices.removeAll(indicesToRemove); - postSnapshotIndices.addAll(indicesToAdd); + var backingIndicesToAdd = randomIndexInstances(); + var postSnapshotBackingIndices = new ArrayList<>(preSnapshotDataStream.getIndices()); + postSnapshotBackingIndices.removeAll(backingIndicesToRemove); + postSnapshotBackingIndices.addAll(backingIndicesToAdd); + + // Mutate failure indices + var failureIndicesToRemove = randomSubsetOf(preSnapshotDataStream.getFailureIndices().getIndices()); + var failureIndicesToAdd = randomIndexInstances(); + var postSnapshotFailureIndices = new ArrayList<>(preSnapshotDataStream.getFailureIndices().getIndices()); + postSnapshotFailureIndices.removeAll(failureIndicesToRemove); + postSnapshotFailureIndices.addAll(failureIndicesToAdd); var replicated = preSnapshotDataStream.isReplicated() && randomBoolean(); var postSnapshotDataStream = preSnapshotDataStream.copy() .setBackingIndices( preSnapshotDataStream.getBackingIndices() .copy() - .setIndices(postSnapshotIndices) + .setIndices(postSnapshotBackingIndices) + .setRolloverOnWrite(replicated == false && preSnapshotDataStream.rolloverOnWrite()) + .build() + ) + .setFailureIndices( + preSnapshotDataStream.getFailureIndices() + .copy() + .setIndices(postSnapshotFailureIndices) .setRolloverOnWrite(replicated == false && preSnapshotDataStream.rolloverOnWrite()) .build() ) @@ -891,9 +909,10 @@ public void testSnapshot() { .setReplicated(replicated) .build(); - var reconciledDataStream = postSnapshotDataStream.snapshot( - preSnapshotDataStream.getIndices().stream().map(Index::getName).toList() - ); + Set indicesInSnapshot = new HashSet<>(); + preSnapshotDataStream.getIndices().forEach(index -> indicesInSnapshot.add(index.getName())); + preSnapshotDataStream.getFailureIndices().getIndices().forEach(index -> indicesInSnapshot.add(index.getName())); + var reconciledDataStream = postSnapshotDataStream.snapshot(indicesInSnapshot, Metadata.builder()); assertThat(reconciledDataStream.getName(), equalTo(postSnapshotDataStream.getName())); assertThat(reconciledDataStream.getGeneration(), equalTo(postSnapshotDataStream.getGeneration())); @@ -907,9 +926,19 @@ public void testSnapshot() { } assertThat(reconciledDataStream.isHidden(), equalTo(postSnapshotDataStream.isHidden())); assertThat(reconciledDataStream.isReplicated(), equalTo(postSnapshotDataStream.isReplicated())); - assertThat(reconciledDataStream.getIndices(), everyItem(not(in(indicesToRemove)))); - assertThat(reconciledDataStream.getIndices(), everyItem(not(in(indicesToAdd)))); - assertThat(reconciledDataStream.getIndices().size(), equalTo(preSnapshotDataStream.getIndices().size() - indicesToRemove.size())); + assertThat(reconciledDataStream.getIndices(), everyItem(not(in(backingIndicesToRemove)))); + assertThat(reconciledDataStream.getIndices(), everyItem(not(in(backingIndicesToAdd)))); + assertThat( + reconciledDataStream.getIndices().size(), + equalTo(preSnapshotDataStream.getIndices().size() - backingIndicesToRemove.size()) + ); + var reconciledFailureIndices = reconciledDataStream.getFailureIndices().getIndices(); + assertThat(reconciledFailureIndices, everyItem(not(in(failureIndicesToRemove)))); + assertThat(reconciledFailureIndices, everyItem(not(in(failureIndicesToAdd)))); + assertThat( + reconciledFailureIndices.size(), + equalTo(preSnapshotDataStream.getFailureIndices().getIndices().size() - failureIndicesToRemove.size()) + ); } public void testSnapshotWithAllBackingIndicesRemoved() { @@ -920,7 +949,12 @@ public void testSnapshotWithAllBackingIndicesRemoved() { .setBackingIndices(preSnapshotDataStream.getBackingIndices().copy().setIndices(indicesToAdd).build()) .build(); - assertNull(postSnapshotDataStream.snapshot(preSnapshotDataStream.getIndices().stream().map(Index::getName).toList())); + assertNull( + postSnapshotDataStream.snapshot( + preSnapshotDataStream.getIndices().stream().map(Index::getName).collect(Collectors.toSet()), + Metadata.builder() + ) + ); } public void testSelectTimeSeriesWriteIndex() { From 78bd9ec6f082e67199bd1df742c72325b2169e2c Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Thu, 19 Dec 2024 11:43:34 +0100 Subject: [PATCH 32/62] [DOCS] Updates SharePoint Online page (#118318) --- .../connectors-sharepoint-online.asciidoc | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) diff --git a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc index 02f598c16f63c..2680e3ff840a6 100644 --- a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc @@ -133,6 +133,58 @@ The application name will appear in the Title box. ---- +[discrete#es-connectors-sharepoint-online-sites-selected-permissions] +====== Granting `Sites.Selected` permissions + +To configure `Sites.Selected` permissions, follow these steps in the Azure Active Directory portal. These permissions enable precise access control to specific SharePoint sites. + +. Sign in to the https://portal.azure.com/[Azure Active Directory portal^]. +. Navigate to **App registrations** and locate the application created for the connector. +. Under **API permissions**, click **Add permission**. +. Select **Microsoft Graph** > **Application permissions**, then add `Sites.Selected`. +. Click **Grant admin consent** to approve the permission. + +[TIP] +==== +Refer to the official https://learn.microsoft.com/en-us/graph/permissions-reference[Microsoft documentation] for managing permissions in Azure AD. +==== + +To assign access to specific SharePoint sites using `Sites.Selected`: + +. Use Microsoft Graph Explorer or PowerShell to grant access. +. To fetch the site ID, run the following Graph API query: ++ +[source, http] +---- +GET https://graph.microsoft.com/v1.0/sites?select=webUrl,Title,Id&$search="*" +---- ++ +This will return the `id` of the site. + +. Use the `id` to assign read or write access: ++ +[source, http] +---- +POST https://graph.microsoft.com/v1.0/sites//permissions +{ + "roles": ["read"], // or "write" + "grantedToIdentities": [ + { + "application": { + "id": "", + "displayName": "" + } + } + ] +} +---- + +[NOTE] +==== +When using the `Comma-separated list of sites` configuration field, ensure the sites specified match those granted `Sites.Selected` permission in SharePoint. +If the `Comma-separated list of sites` field is set to `*` or the `Enumerate all sites` toggle is enabled, the connector will attempt to access all sites. This requires broader permissions, which are not supported with `Sites.Selected`. +==== + .Graph API permissions **** Microsoft recommends using Graph API for all operations with Sharepoint Online. Graph API is well-documented and more efficient at fetching data, which helps avoid throttling. @@ -594,6 +646,59 @@ The application name will appear in the Title box. ---- +[discrete#es-connectors-sharepoint-online-sites-selected-permissions-self-managed] +====== Granting `Sites.Selected` permissions + +To configure `Sites.Selected` permissions, follow these steps in the Azure Active Directory portal. These permissions enable precise access control to specific SharePoint sites. + +. Sign in to the https://portal.azure.com/[Azure Active Directory portal^]. +. Navigate to **App registrations** and locate the application created for the connector. +. Under **API permissions**, click **Add permission**. +. Select **Microsoft Graph** > **Application permissions**, then add `Sites.Selected`. +. Click **Grant admin consent** to approve the permission. + +[TIP] +==== +Refer to the official https://learn.microsoft.com/en-us/graph/permissions-reference[Microsoft documentation] for managing permissions in Azure AD. +==== + + +To assign access to specific SharePoint sites using `Sites.Selected`: + +. Use Microsoft Graph Explorer or PowerShell to grant access. +. To fetch the site ID, run the following Graph API query: ++ +[source, http] +---- +GET https://graph.microsoft.com/v1.0/sites?select=webUrl,Title,Id&$search="*" +---- ++ +This will return the `id` of the site. + +. Use the `id` to assign read or write access: ++ +[source, http] +---- +POST https://graph.microsoft.com/v1.0/sites//permissions +{ + "roles": ["read"], // or "write" + "grantedToIdentities": [ + { + "application": { + "id": "", + "displayName": "" + } + } + ] +} +---- + +[NOTE] +==== +When using the `Comma-separated list of sites` configuration field, ensure the sites specified match those granted `Sites.Selected` permission in SharePoint. +If the `Comma-separated list of sites` field is set to `*` or the `Enumerate all sites` toggle is enabled, the connector will attempt to access all sites. This requires broader permissions, which are not supported with `Sites.Selected`. +==== + .Graph API permissions **** Microsoft recommends using Graph API for all operations with Sharepoint Online. Graph API is well-documented and more efficient at fetching data, which helps avoid throttling. From 54879278b1c15ed5fd7fac2f22b6cc852326a143 Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Thu, 19 Dec 2024 11:40:09 +0000 Subject: [PATCH 33/62] Update data stream deprecations warnings to new format and filter searchable snapshots from response (#118562) * Update data stream deprecations warnings to new format * Add reindex_required flag to index version deprecation notice response * PR Changes * Move all deprecation checks to use a shared predicate which also excludes snapshots * Update docs/changelog/118562.yaml * Tests for excluding snapshots * PR Changes - Remove leftover comment --- docs/changelog/118562.yaml | 6 ++ .../deprecation/DeprecatedIndexPredicate.java | 47 ++++++++++++ .../DataStreamDeprecationChecks.java | 58 +++++--------- .../deprecation/IndexDeprecationChecks.java | 9 ++- .../DataStreamDeprecationChecksTests.java | 76 +++++++++---------- .../IndexDeprecationChecksTests.java | 24 +++++- .../action/ReindexDataStreamAction.java | 18 ----- ...ReindexDataStreamIndexTransportAction.java | 5 +- .../ReindexDataStreamTransportAction.java | 4 +- ...indexDataStreamPersistentTaskExecutor.java | 6 +- 10 files changed, 138 insertions(+), 115 deletions(-) create mode 100644 docs/changelog/118562.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecatedIndexPredicate.java diff --git a/docs/changelog/118562.yaml b/docs/changelog/118562.yaml new file mode 100644 index 0000000000000..a6b00b326151f --- /dev/null +++ b/docs/changelog/118562.yaml @@ -0,0 +1,6 @@ +pr: 118562 +summary: Update data stream deprecations warnings to new format and filter searchable + snapshots from response +area: Data streams +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecatedIndexPredicate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecatedIndexPredicate.java new file mode 100644 index 0000000000000..024d24fdf5151 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecatedIndexPredicate.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.deprecation; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; + +import java.util.function.Predicate; + +public class DeprecatedIndexPredicate { + + public static final IndexVersion MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE = IndexVersions.UPGRADE_TO_LUCENE_10_0_0; + + /* + * This predicate allows through only indices that were created with a previous lucene version, meaning that they need to be reindexed + * in order to be writable in the _next_ lucene version. + * + * It ignores searchable snapshots as they are not writable. + */ + public static Predicate getReindexRequiredPredicate(Metadata metadata) { + return index -> { + IndexMetadata indexMetadata = metadata.index(index); + return reindexRequired(indexMetadata); + }; + } + + public static boolean reindexRequired(IndexMetadata indexMetadata) { + return creationVersionBeforeMinimumWritableVersion(indexMetadata) && isNotSearchableSnapshot(indexMetadata); + } + + private static boolean isNotSearchableSnapshot(IndexMetadata indexMetadata) { + return indexMetadata.isSearchableSnapshot() == false; + } + + private static boolean creationVersionBeforeMinimumWritableVersion(IndexMetadata metadata) { + return metadata.getCreationVersion().before(MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE); + } + +} diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java index ee029d01427aa..65f2659fda04a 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java @@ -10,10 +10,12 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.xpack.core.deprecation.DeprecatedIndexPredicate; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; import static java.util.Map.entry; import static java.util.Map.ofEntries; @@ -21,54 +23,28 @@ public class DataStreamDeprecationChecks { static DeprecationIssue oldIndicesCheck(DataStream dataStream, ClusterState clusterState) { List backingIndices = dataStream.getIndices(); - boolean hasOldIndices = backingIndices.stream() - .anyMatch(index -> clusterState.metadata().index(index).getCompatibilityVersion().before(IndexVersions.V_8_0_0)); - if (hasOldIndices) { - long totalIndices = backingIndices.size(); - List oldIndices = backingIndices.stream() - .filter(index -> clusterState.metadata().index(index).getCompatibilityVersion().before(IndexVersions.V_8_0_0)) - .toList(); - long totalOldIndices = oldIndices.size(); - long totalOldSearchableSnapshots = oldIndices.stream() - .filter(index -> clusterState.metadata().index(index).isSearchableSnapshot()) - .count(); - long totalOldPartiallyMountedSearchableSnapshots = oldIndices.stream() - .filter(index -> clusterState.metadata().index(index).isPartialSearchableSnapshot()) - .count(); - long totalOldFullyMountedSearchableSnapshots = totalOldSearchableSnapshots - totalOldPartiallyMountedSearchableSnapshots; + + Set indicesNeedingUpgrade = backingIndices.stream() + .filter(DeprecatedIndexPredicate.getReindexRequiredPredicate(clusterState.metadata())) + .map(Index::getName) + .collect(Collectors.toUnmodifiableSet()); + + if (indicesNeedingUpgrade.isEmpty() == false) { return new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old data stream with a compatibility version < 8.0", + "Old data stream with a compatibility version < 9.0", "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", - "This data stream has backing indices that were created before Elasticsearch 8.0.0", + "This data stream has backing indices that were created before Elasticsearch 9.0.0", false, ofEntries( - entry( - "backing_indices", - ofEntries( - entry("count", totalIndices), - entry( - "need_upgrading", - ofEntries( - entry("count", totalOldIndices), - entry( - "searchable_snapshots", - ofEntries( - entry("count", totalOldSearchableSnapshots), - entry("fully_mounted", ofEntries(entry("count", totalOldFullyMountedSearchableSnapshots))), - entry( - "partially_mounted", - ofEntries(entry("count", totalOldPartiallyMountedSearchableSnapshots)) - ) - ) - ) - ) - ) - ) - ) + entry("reindex_required", true), + entry("total_backing_indices", backingIndices.size()), + entry("indices_requiring_upgrade_count", indicesNeedingUpgrade.size()), + entry("indices_requiring_upgrade", indicesNeedingUpgrade) ) ); } + return null; } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index aaf58a44a6565..de06e270a867e 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -14,12 +14,13 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.engine.frozen.FrozenEngine; import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.xpack.core.deprecation.DeprecatedIndexPredicate; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -36,14 +37,14 @@ static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata, ClusterStat // TODO: this check needs to be revised. It's trivially true right now. IndexVersion currentCompatibilityVersion = indexMetadata.getCompatibilityVersion(); // We intentionally exclude indices that are in data streams because they will be picked up by DataStreamDeprecationChecks - if (currentCompatibilityVersion.before(IndexVersions.V_8_0_0) && isNotDataStreamIndex(indexMetadata, clusterState)) { + if (DeprecatedIndexPredicate.reindexRequired(indexMetadata) && isNotDataStreamIndex(indexMetadata, clusterState)) { return new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old index with a compatibility version < 8.0", + "Old index with a compatibility version < 9.0", "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", "This index has version: " + currentCompatibilityVersion.toReleaseVersion(), false, - null + Collections.singletonMap("reindex_required", true) ); } return null; diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java index d5325fb0ff3a4..b297cc1a5bdf8 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java @@ -17,41 +17,46 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import static java.util.Collections.singletonList; +import static java.util.Map.entry; +import static java.util.Map.ofEntries; +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.DATA_STREAM_CHECKS; import static org.hamcrest.Matchers.equalTo; public class DataStreamDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { - long oldIndexCount = randomIntBetween(1, 100); - long newIndexCount = randomIntBetween(1, 100); - long oldSearchableSnapshotCount = 0; - long oldFullyManagedSearchableSnapshotCount = 0; - long oldPartiallyManagedSearchableSnapshotCount = 0; + int oldIndexCount = randomIntBetween(1, 100); + int newIndexCount = randomIntBetween(1, 100); + List allIndices = new ArrayList<>(); Map nameToIndexMetadata = new HashMap<>(); + Set expectedIndices = new HashSet<>(); + for (int i = 0; i < oldIndexCount; i++) { - Settings.Builder settingsBuilder = settings(IndexVersion.fromId(7170099)); - if (randomBoolean()) { - settingsBuilder.put("index.store.type", "snapshot"); - if (randomBoolean()) { - oldFullyManagedSearchableSnapshotCount++; - } else { - settingsBuilder.put("index.store.snapshot.partial", true); - oldPartiallyManagedSearchableSnapshotCount++; - } - oldSearchableSnapshotCount++; + Settings.Builder settings = settings(IndexVersion.fromId(7170099)); + + String indexName = "old-data-stream-index-" + i; + if (expectedIndices.isEmpty() == false && randomIntBetween(0, 2) == 0) { + settings.put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE); + } else { + expectedIndices.add(indexName); } - IndexMetadata oldIndexMetadata = IndexMetadata.builder("old-data-stream-index-" + i) + + Settings.Builder settingsBuilder = settings; + IndexMetadata oldIndexMetadata = IndexMetadata.builder(indexName) .settings(settingsBuilder) .numberOfShards(1) .numberOfReplicas(0) @@ -59,11 +64,9 @@ public void testOldIndicesCheck() { allIndices.add(oldIndexMetadata.getIndex()); nameToIndexMetadata.put(oldIndexMetadata.getIndex().getName(), oldIndexMetadata); } + for (int i = 0; i < newIndexCount; i++) { Settings.Builder settingsBuilder = settings(IndexVersion.current()); - if (randomBoolean()) { - settingsBuilder.put("index.store.type", "snapshot"); - } IndexMetadata newIndexMetadata = IndexMetadata.builder("new-data-stream-index-" + i) .settings(settingsBuilder) .numberOfShards(1) @@ -72,6 +75,7 @@ public void testOldIndicesCheck() { allIndices.add(newIndexMetadata.getIndex()); nameToIndexMetadata.put(newIndexMetadata.getIndex().getName(), newIndexMetadata); } + DataStream dataStream = new DataStream( randomAlphaOfLength(10), allIndices, @@ -88,37 +92,27 @@ public void testOldIndicesCheck() { randomBoolean(), null ); + Metadata metadata = Metadata.builder().indices(nameToIndexMetadata).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + DeprecationIssue expected = new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old data stream with a compatibility version < 8.0", + "Old data stream with a compatibility version < 9.0", "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", - "This data stream has backing indices that were created before Elasticsearch 8.0.0", + "This data stream has backing indices that were created before Elasticsearch 9.0.0", false, - Map.of( - "backing_indices", - Map.of( - "count", - oldIndexCount + newIndexCount, - "need_upgrading", - Map.of( - "count", - oldIndexCount, - "searchable_snapshots", - Map.of( - "count", - oldSearchableSnapshotCount, - "fully_mounted", - Map.of("count", oldFullyManagedSearchableSnapshotCount), - "partially_mounted", - Map.of("count", oldPartiallyManagedSearchableSnapshotCount) - ) - ) - ) + ofEntries( + entry("reindex_required", true), + entry("total_backing_indices", oldIndexCount + newIndexCount), + entry("indices_requiring_upgrade_count", expectedIndices.size()), + entry("indices_requiring_upgrade", expectedIndices) ) ); + List issues = DeprecationChecks.filterChecks(DATA_STREAM_CHECKS, c -> c.apply(dataStream, clusterState)); + assertThat(issues, equalTo(singletonList(expected))); } + } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 48cbef6831a2b..c6f3208a1cfb0 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -19,8 +19,8 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.engine.frozen.FrozenEngine; +import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; @@ -29,6 +29,8 @@ import java.util.Map; import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -48,11 +50,11 @@ public void testOldIndicesCheck() { .build(); DeprecationIssue expected = new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old index with a compatibility version < 8.0", + "Old index with a compatibility version < 9.0", "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", "This index has version: " + createdWith.toReleaseVersion(), false, - null + singletonMap("reindex_required", true) ); List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState)); assertEquals(singletonList(expected), issues); @@ -100,6 +102,20 @@ public void testOldIndicesCheckDataStreamIndex() { assertThat(issues.size(), equalTo(0)); } + public void testOldIndicesCheckSnapshotIgnored() { + IndexVersion createdWith = IndexVersion.fromId(7170099); + Settings.Builder settings = settings(createdWith); + settings.put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE); + IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(indexMetadata, true)) + .build(); + + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState)); + + assertThat(issues, empty()); + } + public void testTranslogRetentionSettings() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); @@ -229,7 +245,7 @@ public void testCamelCaseDeprecation() throws IOException { + "} }"; IndexMetadata simpleIndex = IndexMetadata.builder(randomAlphaOfLengthBetween(5, 10)) - .settings(settings(IndexVersions.MINIMUM_COMPATIBLE)) + .settings(settings(IndexVersion.current())) .numberOfShards(1) .numberOfReplicas(1) .putMapping(simpleMapping) diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java index b10bea9e54230..9e4cbb1082215 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java @@ -13,14 +13,10 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; @@ -43,24 +39,10 @@ public class ReindexDataStreamAction extends ActionType getOldIndexVersionPredicate(Metadata metadata) { - return index -> metadata.index(index).getCreationVersion().onOrBefore(MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE); - } - public enum Mode { UPGRADE } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java index 66b13a9ce22b0..38b5da6527039 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.deprecation.DeprecatedIndexPredicate; import java.util.Locale; import java.util.Map; @@ -78,13 +79,13 @@ protected void doExecute( IndexMetadata sourceIndex = clusterService.state().getMetadata().index(sourceIndexName); Settings settingsBefore = sourceIndex.getSettings(); - var hasOldVersion = ReindexDataStreamAction.getOldIndexVersionPredicate(clusterService.state().metadata()); + var hasOldVersion = DeprecatedIndexPredicate.getReindexRequiredPredicate(clusterService.state().metadata()); if (hasOldVersion.test(sourceIndex.getIndex()) == false) { logger.warn( "Migrating index [{}] with version [{}] is unnecessary as its version is not before [{}]", sourceIndexName, sourceIndex.getCreationVersion(), - ReindexDataStreamAction.MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE + DeprecatedIndexPredicate.MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE ); } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java index f011c429ce79c..cc648c1984544 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java @@ -26,8 +26,8 @@ import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTask; import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTaskParams; +import static org.elasticsearch.xpack.core.deprecation.DeprecatedIndexPredicate.getReindexRequiredPredicate; import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.TASK_ID_PREFIX; -import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.getOldIndexVersionPredicate; /* * This transport action creates a new persistent task for reindexing the source data stream given in the request. On successful creation @@ -68,7 +68,7 @@ protected void doExecute(Task task, ReindexDataStreamRequest request, ActionList return; } int totalIndices = dataStream.getIndices().size(); - int totalIndicesToBeUpgraded = (int) dataStream.getIndices().stream().filter(getOldIndexVersionPredicate(metadata)).count(); + int totalIndicesToBeUpgraded = (int) dataStream.getIndices().stream().filter(getReindexRequiredPredicate(metadata)).count(); ReindexDataStreamTaskParams params = new ReindexDataStreamTaskParams( sourceDataStreamName, transportService.getThreadPool().absoluteTimeInMillis(), diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index dc8e33bc091e6..30f64fdd1d6f6 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -35,7 +35,7 @@ import java.util.Map; import java.util.NoSuchElementException; -import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.getOldIndexVersionPredicate; +import static org.elasticsearch.xpack.core.deprecation.DeprecatedIndexPredicate.getReindexRequiredPredicate; public class ReindexDataStreamPersistentTaskExecutor extends PersistentTasksExecutor { private static final TimeValue TASK_KEEP_ALIVE_TIME = TimeValue.timeValueDays(1); @@ -84,7 +84,7 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask List dataStreamInfos = response.getDataStreams(); if (dataStreamInfos.size() == 1) { DataStream dataStream = dataStreamInfos.getFirst().getDataStream(); - if (getOldIndexVersionPredicate(clusterService.state().metadata()).test(dataStream.getWriteIndex())) { + if (getReindexRequiredPredicate(clusterService.state().metadata()).test(dataStream.getWriteIndex())) { reindexClient.execute( RolloverAction.INSTANCE, new RolloverRequest(sourceDataStream, null), @@ -109,7 +109,7 @@ private void reindexIndices( String sourceDataStream ) { List indices = dataStream.getIndices(); - List indicesToBeReindexed = indices.stream().filter(getOldIndexVersionPredicate(clusterService.state().metadata())).toList(); + List indicesToBeReindexed = indices.stream().filter(getReindexRequiredPredicate(clusterService.state().metadata())).toList(); reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); // The CountDownActionListener is 1 more than the number of indices so that the count is not 0 if we have no indices CountDownActionListener listener = new CountDownActionListener(indicesToBeReindexed.size() + 1, ActionListener.wrap(response1 -> { From d80cbddc53db68250917b25ac9c9dcce8efddf5c Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 19 Dec 2024 13:51:15 +0200 Subject: [PATCH 34/62] Add LogsDB option to route on sort fields (#116687) * Add LogsDB option to route on sort fields * fix encoding * Update docs/changelog/116687.yaml * tests * tests * tests * fix mode * tests * tests * tests * add test * fix test * sync * updates from review * test fixes * test fixes * test fixes * Move logic to SyntheticSourceIndexSettingsProvider * fix test * sync * merge, no fallback * comments * fix test * address comments * address comments * address comments * Update x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java Co-authored-by: Martijn van Groningen * [CI] Auto commit changes from spotless * update tests * [CI] Auto commit changes from spotless * update tests * fix rest compat tests --------- Co-authored-by: Martijn van Groningen Co-authored-by: elasticsearchmachine --- docs/changelog/116687.yaml | 5 + rest-api-spec/build.gradle | 9 + .../rest-api-spec/test/logsdb/10_settings.yml | 42 ++++ .../rest-api-spec/test/tsdb/20_mapping.yml | 2 +- .../test/tsdb/25_id_generation.yml | 2 +- .../test/tsdb/80_index_resize.yml | 2 +- .../test/tsdb/90_unsupported_operations.yml | 12 +- .../action/index/IndexRequest.java | 13 +- .../cluster/routing/IndexRouting.java | 28 ++- .../cluster/routing/RoutingFeatures.java | 5 + .../TimeBasedKOrderedUUIDGenerator.java | 17 +- .../java/org/elasticsearch/common/UUIDs.java | 15 +- .../common/settings/IndexScopedSettings.java | 1 + .../org/elasticsearch/index/IndexMode.java | 15 +- .../elasticsearch/index/IndexSettings.java | 16 ++ .../index/mapper/DocumentMapper.java | 13 +- .../cluster/routing/IndexRoutingTests.java | 46 +++- .../common/TimeBasedUUIDGeneratorTests.java | 8 + .../index/TimeSeriesModeTests.java | 9 +- x-pack/plugin/logsdb/build.gradle | 3 + .../xpack/logsdb/LogsdbWithBasicRestIT.java | 57 +++++ .../xpack/logsdb/LogsIndexingIT.java | 213 ++++++++++++++++++ .../xpack/logsdb/LogsdbRestIT.java | 56 +++++ .../LogsdbIndexModeSettingsProvider.java | 53 ++++- .../LogsdbIndexModeSettingsProviderTests.java | 98 +++++++- 25 files changed, 682 insertions(+), 58 deletions(-) create mode 100644 docs/changelog/116687.yaml create mode 100644 x-pack/plugin/logsdb/src/internalClusterTest/java/org/elasticsearch/xpack/logsdb/LogsIndexingIT.java diff --git a/docs/changelog/116687.yaml b/docs/changelog/116687.yaml new file mode 100644 index 0000000000000..f8c7f86eff04a --- /dev/null +++ b/docs/changelog/116687.yaml @@ -0,0 +1,5 @@ +pr: 116687 +summary: Add LogsDB option to route on sort fields +area: Logs +type: enhancement +issues: [] diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index f23b5460f7d53..fab47c5b05006 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -59,6 +59,15 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.replaceValueInMatch("profile.shards.0.dfs.knn.0.query.0.description", "DocAndScoreQuery[0,...][0.009673266,...],0.009673266", "dfs knn vector profiling with vector_operations_count") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") task.skipTest("cat.shards/10_basic/Help", "sync_id is removed in 9.0") + task.skipTest("tsdb/20_mapping/exact match object type", "skip until pr/116687 gets backported") + task.skipTest("tsdb/25_id_generation/delete over _bulk", "skip until pr/116687 gets backported") + task.skipTest("tsdb/80_index_resize/split", "skip until pr/116687 gets backported") + task.skipTest("tsdb/90_unsupported_operations/noop update", "skip until pr/116687 gets backported") + task.skipTest("tsdb/90_unsupported_operations/regular update", "skip until pr/116687 gets backported") + task.skipTest("tsdb/90_unsupported_operations/search with routing", "skip until pr/116687 gets backported") + task.skipTest("tsdb/90_unsupported_operations/index with routing over _bulk", "skip until pr/116687 gets backported") + task.skipTest("tsdb/90_unsupported_operations/update over _bulk", "skip until pr/116687 gets backported") + task.skipTest("tsdb/90_unsupported_operations/index with routing", "skip until pr/116687 gets backported") task.skipTest("search/500_date_range/from, to, include_lower, include_upper deprecated", "deprecated parameters are removed in 9.0") task.skipTest("tsdb/20_mapping/stored source is supported", "no longer serialize source_mode") task.skipTest("tsdb/20_mapping/Synthetic source", "no longer serialize source_mode") diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index 463df7d2ab1bb..5f4314f724c23 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -513,6 +513,48 @@ routing path not allowed in logs mode: - match: { error.type: "illegal_argument_exception" } - match: { error.reason: "[index.routing_path] requires [index.mode=time_series]" } +--- +routing path allowed in logs mode with routing on sort fields: + - requires: + cluster_features: [ "routing.logsb_route_on_sort_fields" ] + reason: introduction of route on index sorting fields + + - do: + indices.create: + index: test + body: + settings: + index: + mode: logsdb + number_of_replicas: 0 + number_of_shards: 2 + routing_path: [ host.name, agent_id ] + logsdb: + route_on_sort_fields: true + mappings: + properties: + "@timestamp": + type: date + host.name: + type: keyword + agent_id: + type: keyword + process_id: + type: integer + http_method: + type: keyword + message: + type: text + + - do: + indices.get_settings: + index: test + + - is_true: test + - match: { test.settings.index.mode: logsdb } + - match: { test.settings.index.logsdb.route_on_sort_fields: "true" } + - match: { test.settings.index.routing_path: [ host.name, agent_id ] } + --- start time not allowed in logs mode: - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index 9fe3f5e0b7272..f25601fc2e228 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -127,7 +127,7 @@ exact match object type: reason: routing_path error message updated in 8.14.0 - do: - catch: '/All fields that match routing_path must be configured with \[time_series_dimension: true\] or flattened fields with a list of dimensions in \[time_series_dimensions\] and without the \[script\] parameter. \[dim\] was \[object\]./' + catch: '/All fields that match routing_path must be .*flattened fields.* \[dim\] was \[object\]./' indices.create: index: tsdb_index body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml index 4faa0424adb43..beba6f2752a11 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml @@ -427,7 +427,7 @@ delete over _bulk: - match: {items.0.delete.result: deleted} - match: {items.1.delete.result: deleted} - match: {items.2.delete.status: 404} - - match: {items.2.delete.error.reason: "invalid id [not found ++ not found] for index [id_generation_test] in time series mode"} + - match: {items.2.delete.error.reason: '/invalid\ id\ \[not\ found\ \+\+\ not\ found\]\ for\ index\ \[id_generation_test\]\ in\ time.series\ mode/'} --- routing_path matches deep object: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml index c32d3c50b0784..c71555dd073d6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml @@ -95,7 +95,7 @@ split: reason: tsdb indexing changed in 8.2.0 - do: - catch: /index-split is not supported because the destination index \[test\] is in time series mode/ + catch: /index-split is not supported because the destination index \[test\] is in time.series mode/ indices.split: index: test target: test_split diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index 54b2bf59c8ddc..142d1281ad12b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -75,7 +75,7 @@ index with routing: reason: tsdb indexing changed in 8.2.0 - do: - catch: /specifying routing is not supported because the destination index \[test\] is in time series mode/ + catch: /specifying routing is not supported because the destination index \[test\] is in time.series mode/ index: index: test routing: foo @@ -104,7 +104,7 @@ index with routing over _bulk: body: - '{"index": {"routing": "foo"}}' - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - - match: {items.0.index.error.reason: "specifying routing is not supported because the destination index [test] is in time series mode"} + - match: {items.0.index.error.reason: '/specifying\ routing\ is\ not\ supported\ because\ the\ destination\ index\ \[test\]\ is\ in\ time.series\ mode/'} --- noop update: @@ -120,7 +120,7 @@ noop update: - length: {hits.hits: 1} - do: - catch: /update is not supported because the destination index \[test\] is in time series mode/ + catch: /update is not supported because the destination index \[test\] is in time.series mode/ update: index: test id: "1" @@ -136,7 +136,7 @@ regular update: # We fail even though the document isn't found. - do: - catch: /update is not supported because the destination index \[test\] is in time series mode/ + catch: /update is not supported because the destination index \[test\] is in time.series mode/ update: index: test id: "1" @@ -165,7 +165,7 @@ update over _bulk: body: - '{"update": {"_id": 1}}' - '{"doc":{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}}' - - match: {items.0.update.error.reason: "update is not supported because the destination index [test] is in time series mode"} + - match: {items.0.update.error.reason: '/update\ is\ not\ supported\ because\ the\ destination\ index\ \[test\]\ is\ in\ time.series\ mode/'} --- search with routing: @@ -175,7 +175,7 @@ search with routing: # We fail even though the document isn't found. - do: - catch: /searching with a specified routing is not supported because the destination index \[test\] is in time series mode/ + catch: /searching with a specified routing is not supported because the destination index \[test\] is in time.series mode/ search: index: test routing: rrrr diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index d5b8b657bd14e..9f4231c25dfca 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -51,6 +51,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.OptionalInt; import java.util.function.Supplier; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -78,7 +79,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_10_X; private static final Supplier ID_GENERATOR = UUIDs::base64UUID; - private static final Supplier K_SORTED_TIME_BASED_ID_GENERATOR = UUIDs::base64TimeBasedKOrderedUUID; /** * Max length of the source document to include into string() @@ -705,9 +705,18 @@ public void autoGenerateId() { } public void autoGenerateTimeBasedId() { + autoGenerateTimeBasedId(OptionalInt.empty()); + } + + /** + * Set the {@code #id()} to an automatically generated one, optimized for storage (compression) efficiency. + * If a routing hash is passed, it is included in the generated id starting at 9 bytes before the end. + * @param hash optional routing hash value, used to route requests by id to the right shard. + */ + public void autoGenerateTimeBasedId(OptionalInt hash) { assertBeforeGeneratingId(); autoGenerateTimestamp(); - id(K_SORTED_TIME_BASED_ID_GENERATOR.get()); + id(UUIDs.base64TimeBasedKOrderedUUIDWithHash(hash)); } private void autoGenerateTimestamp() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index f42252df4ab7b..ecd1b735cc396 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -40,6 +40,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.OptionalInt; import java.util.Set; import java.util.function.IntConsumer; import java.util.function.IntSupplier; @@ -55,6 +56,7 @@ public abstract class IndexRouting { static final NodeFeature BOOLEAN_ROUTING_PATH = new NodeFeature("routing.boolean_routing_path"); static final NodeFeature MULTI_VALUE_ROUTING_PATH = new NodeFeature("routing.multi_value_routing_path"); + static final NodeFeature LOGSB_ROUTE_ON_SORT_FIELDS = new NodeFeature("routing.logsb_route_on_sort_fields"); /** * Build the routing from {@link IndexMetadata}. @@ -165,7 +167,8 @@ private abstract static class IdAndRoutingOnly extends IndexRouting { @Override public void preProcess(IndexRequest indexRequest) { - // generate id if not already provided + // Generate id if not already provided. + // This is needed for routing, so it has to happen in pre-processing. final String id = indexRequest.id(); if (id == null) { if (shouldUseTimeBasedId(indexMode, creationVersion)) { @@ -272,7 +275,9 @@ public void collectSearchShards(String routing, IntConsumer consumer) { public static class ExtractFromSource extends IndexRouting { private final Predicate isRoutingPath; private final XContentParserConfiguration parserConfig; + private final IndexMode indexMode; private final boolean trackTimeSeriesRoutingHash; + private final boolean addIdWithRoutingHash; private int hash = Integer.MAX_VALUE; ExtractFromSource(IndexMetadata metadata) { @@ -280,7 +285,10 @@ public static class ExtractFromSource extends IndexRouting { if (metadata.isRoutingPartitionedIndex()) { throw new IllegalArgumentException("routing_partition_size is incompatible with routing_path"); } - trackTimeSeriesRoutingHash = metadata.getCreationVersion().onOrAfter(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID); + indexMode = metadata.getIndexMode(); + trackTimeSeriesRoutingHash = indexMode == IndexMode.TIME_SERIES + && metadata.getCreationVersion().onOrAfter(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID); + addIdWithRoutingHash = indexMode == IndexMode.LOGSDB; List routingPaths = metadata.getRoutingPaths(); isRoutingPath = Regex.simpleMatcher(routingPaths.toArray(String[]::new)); this.parserConfig = XContentParserConfiguration.EMPTY.withFiltering(null, Set.copyOf(routingPaths), null, true); @@ -292,8 +300,13 @@ public boolean matchesField(String fieldName) { @Override public void postProcess(IndexRequest indexRequest) { + // Update the request with the routing hash, if needed. + // This needs to happen in post-processing, after the routing hash is calculated. if (trackTimeSeriesRoutingHash) { indexRequest.routing(TimeSeriesRoutingHashFieldMapper.encode(hash)); + } else if (addIdWithRoutingHash) { + assert hash != Integer.MAX_VALUE; + indexRequest.autoGenerateTimeBasedId(OptionalInt.of(hash)); } } @@ -461,12 +474,15 @@ private int idToHash(String id) { try { idBytes = Base64.getUrlDecoder().decode(id); } catch (IllegalArgumentException e) { - throw new ResourceNotFoundException("invalid id [{}] for index [{}] in time series mode", id, indexName); + throw new ResourceNotFoundException("invalid id [{}] for index [{}] in " + indexMode.getName() + " mode", id, indexName); } if (idBytes.length < 4) { - throw new ResourceNotFoundException("invalid id [{}] for index [{}] in time series mode", id, indexName); + throw new ResourceNotFoundException("invalid id [{}] for index [{}] in " + indexMode.getName() + " mode", id, indexName); } - return hashToShardId(ByteUtils.readIntLE(idBytes, 0)); + // For TSDB, the hash is stored as the id prefix. + // For LogsDB with routing on sort fields, the routing hash is stored in the range[id.length - 9, id.length - 5] of the id, + // see IndexRequest#autoGenerateTimeBasedId. + return hashToShardId(ByteUtils.readIntLE(idBytes, addIdWithRoutingHash ? idBytes.length - 9 : 0)); } @Override @@ -480,7 +496,7 @@ public void collectSearchShards(String routing, IntConsumer consumer) { } private String error(String operation) { - return operation + " is not supported because the destination index [" + indexName + "] is in time series mode"; + return operation + " is not supported because the destination index [" + indexName + "] is in " + indexMode.getName() + " mode"; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java index f8028ce7f9d68..1545fdf90d111 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java @@ -20,4 +20,9 @@ public class RoutingFeatures implements FeatureSpecification { public Set getFeatures() { return Set.of(IndexRouting.BOOLEAN_ROUTING_PATH, IndexRouting.MULTI_VALUE_ROUTING_PATH); } + + @Override + public Set getTestFeatures() { + return Set.of(IndexRouting.LOGSB_ROUTE_ON_SORT_FIELDS); + } } diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java index 7ea58ee326a79..58ad3f5b47415 100644 --- a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java @@ -9,7 +9,10 @@ package org.elasticsearch.common; +import org.elasticsearch.common.util.ByteUtils; + import java.nio.ByteBuffer; +import java.util.OptionalInt; import java.util.function.Supplier; /** @@ -28,6 +31,7 @@ * The result is a compact base64-encoded string, optimized for efficient compression of the _id field in an inverted index. */ public class TimeBasedKOrderedUUIDGenerator extends TimeBasedUUIDGenerator { + static final int SIZE_IN_BYTES = 15; public TimeBasedKOrderedUUIDGenerator( final Supplier timestampSupplier, @@ -39,6 +43,10 @@ public TimeBasedKOrderedUUIDGenerator( @Override public String getBase64UUID() { + return getBase64UUID(OptionalInt.empty()); + } + + public String getBase64UUID(OptionalInt hash) { final int sequenceId = sequenceNumber.incrementAndGet() & 0x00FF_FFFF; // Calculate timestamp to ensure ordering and avoid backward movement in case of time shifts. @@ -50,7 +58,7 @@ public String getBase64UUID() { sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max ); - final byte[] uuidBytes = new byte[15]; + final byte[] uuidBytes = new byte[SIZE_IN_BYTES + (hash.isPresent() ? 4 : 0)]; final ByteBuffer buffer = ByteBuffer.wrap(uuidBytes); buffer.put((byte) (timestamp >>> 40)); // changes every 35 years @@ -64,6 +72,13 @@ public String getBase64UUID() { assert macAddress.length == 6; buffer.put(macAddress, 0, macAddress.length); + // Copy the hash value if provided + if (hash.isPresent()) { + byte[] hashBytes = new byte[4]; + ByteUtils.writeIntLE(hash.getAsInt(), hashBytes, 0); + buffer.put(hashBytes, 0, hashBytes.length); + } + buffer.put((byte) (sequenceId >>> 16)); // From hereinafter everything is almost like random and does not compress well diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java index ebcb375bc01bc..6b19fcddb87ca 100644 --- a/server/src/main/java/org/elasticsearch/common/UUIDs.java +++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.SecureString; +import java.util.OptionalInt; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; @@ -23,14 +24,14 @@ public class UUIDs { public static final Supplier DEFAULT_TIMESTAMP_SUPPLIER = System::currentTimeMillis; public static final Supplier DEFAULT_SEQUENCE_ID_SUPPLIER = sequenceNumber::incrementAndGet; public static final Supplier DEFAULT_MAC_ADDRESS_SUPPLIER = MacAddressProvider::getSecureMungedAddress; - private static final UUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); - private static final UUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator( + private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); + private static final TimeBasedKOrderedUUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator( DEFAULT_TIMESTAMP_SUPPLIER, DEFAULT_SEQUENCE_ID_SUPPLIER, DEFAULT_MAC_ADDRESS_SUPPLIER ); - private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator( + private static final TimeBasedUUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator( DEFAULT_TIMESTAMP_SUPPLIER, DEFAULT_SEQUENCE_ID_SUPPLIER, DEFAULT_MAC_ADDRESS_SUPPLIER @@ -51,12 +52,8 @@ public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } - public static String base64TimeBasedKOrderedUUID() { - return TIME_BASED_K_ORDERED_GENERATOR.getBase64UUID(); - } - - public static String base64TimeBasedUUID() { - return TIME_UUID_GENERATOR.getBase64UUID(); + public static String base64TimeBasedKOrderedUUIDWithHash(OptionalInt hash) { + return TIME_BASED_K_ORDERED_GENERATOR.getBase64UUID(hash); } /** diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index fc8f128e92f32..09561661ccd52 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -182,6 +182,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING, IndexSettings.LIFECYCLE_PARSE_ORIGINATION_DATE_SETTING, IndexSettings.TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING, + IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS, IndexSettings.PREFER_ILM_SETTING, DataStreamFailureStoreDefinition.FAILURE_STORE_DEFINITION_VERSION_SETTING, FieldMapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING, diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index f5f923f3657f8..a138407991b68 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -63,7 +63,8 @@ public enum IndexMode { STANDARD("standard") { @Override void validateWithOtherSettings(Map, Object> settings) { - IndexMode.validateTimeSeriesSettings(settings); + validateRoutingPathSettings(settings); + validateTimeSeriesSettings(settings); } @Override @@ -235,7 +236,11 @@ public SourceFieldMapper.Mode defaultSourceMode() { LOGSDB("logsdb") { @Override void validateWithOtherSettings(Map, Object> settings) { - IndexMode.validateTimeSeriesSettings(settings); + validateTimeSeriesSettings(settings); + var setting = settings.get(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS); + if (setting.equals(Boolean.FALSE)) { + validateRoutingPathSettings(settings); + } } @Override @@ -389,8 +394,11 @@ public SourceFieldMapper.Mode defaultSourceMode() { private static final String HOST_NAME = "host.name"; - private static void validateTimeSeriesSettings(Map, Object> settings) { + private static void validateRoutingPathSettings(Map, Object> settings) { settingRequiresTimeSeries(settings, IndexMetadata.INDEX_ROUTING_PATH); + } + + private static void validateTimeSeriesSettings(Map, Object> settings) { settingRequiresTimeSeries(settings, IndexSettings.TIME_SERIES_START_TIME); settingRequiresTimeSeries(settings, IndexSettings.TIME_SERIES_END_TIME); } @@ -450,6 +458,7 @@ private static CompressedXContent createDefaultMapping(boolean includeHostName) IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING, IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING, IndexMetadata.INDEX_ROUTING_PATH, + IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS, IndexSettings.TIME_SERIES_START_TIME, IndexSettings.TIME_SERIES_END_TIME ), diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 9273888b9ec91..3d6ec7e2e9242 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -704,6 +704,13 @@ public boolean isES87TSDBCodecEnabled() { return es87TSDBCodecEnabled; } + public static final Setting LOGSDB_ROUTE_ON_SORT_FIELDS = Setting.boolSetting( + "index.logsdb.route_on_sort_fields", + false, + Property.IndexScope, + Property.Final + ); + /** * The {@link IndexMode "mode"} of the index. */ @@ -825,6 +832,7 @@ private static String getIgnoreAboveDefaultValue(final Settings settings) { private final boolean softDeleteEnabled; private volatile long softDeleteRetentionOperations; private final boolean es87TSDBCodecEnabled; + private final boolean logsdbRouteOnSortFields; private volatile long retentionLeaseMillis; @@ -935,6 +943,13 @@ public boolean isDefaultAllowUnmappedFields() { return defaultAllowUnmappedFields; } + /** + * Returns true if routing on sort fields is enabled for LogsDB. The default is false + */ + public boolean logsdbRouteOnSortFields() { + return logsdbRouteOnSortFields; + } + /** * Creates a new {@link IndexSettings} instance. The given node settings will be merged with the settings in the metadata * while index level settings will overwrite node settings. @@ -1027,6 +1042,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti indexRouting = IndexRouting.fromIndexMetadata(indexMetadata); sourceKeepMode = scopedSettings.get(Mapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING); es87TSDBCodecEnabled = scopedSettings.get(TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING); + logsdbRouteOnSortFields = scopedSettings.get(LOGSDB_ROUTE_ON_SORT_FIELDS); skipIgnoredSourceWrite = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING); skipIgnoredSourceRead = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING); indexMappingSourceMode = scopedSettings.get(INDEX_MAPPER_SOURCE_MODE_SETTING); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index a99fa3f93679b..03e6c343c7ab9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.IndexVersion; @@ -177,18 +178,16 @@ public void validate(IndexSettings settings, boolean checkLimits) { } List routingPaths = settings.getIndexMetadata().getRoutingPaths(); for (String path : routingPaths) { - for (String match : mappingLookup.getMatchingFieldNames(path)) { - mappingLookup.getFieldType(match).validateMatchedRoutingPath(path); + if (settings.getMode() == IndexMode.TIME_SERIES) { + for (String match : mappingLookup.getMatchingFieldNames(path)) { + mappingLookup.getFieldType(match).validateMatchedRoutingPath(path); + } } for (String objectName : mappingLookup.objectMappers().keySet()) { // object type is not allowed in the routing paths if (path.equals(objectName)) { throw new IllegalArgumentException( - "All fields that match routing_path must be configured with [time_series_dimension: true] " - + "or flattened fields with a list of dimensions in [time_series_dimensions] " - + "and without the [script] parameter. [" - + objectName - + "] was [object]." + "All fields that match routing_path must be flattened fields. [" + objectName + "] was [object]." ); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java index 943fb6fd63b0b..1db6192eee80e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java @@ -16,6 +16,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.shard.ShardId; @@ -511,7 +513,7 @@ public void testRoutingPathUpdate() throws IOException { IllegalArgumentException.class, () -> routing.updateShard(randomAlphaOfLength(5), randomBoolean() ? null : randomAlphaOfLength(5)) ); - assertThat(e.getMessage(), equalTo("update is not supported because the destination index [test] is in time series mode")); + assertThat(e.getMessage(), equalTo("update is not supported because the destination index [test] is in time_series mode")); } public void testRoutingIndexWithRouting() throws IOException { @@ -525,7 +527,7 @@ public void testRoutingIndexWithRouting() throws IOException { ); assertThat( e.getMessage(), - equalTo("specifying routing is not supported because the destination index [test] is in time series mode") + equalTo("specifying routing is not supported because the destination index [test] is in time_series mode") ); } @@ -534,7 +536,7 @@ public void testRoutingPathCollectSearchWithRouting() throws IOException { Exception e = expectThrows(IllegalArgumentException.class, () -> routing.collectSearchShards(randomAlphaOfLength(5), null)); assertThat( e.getMessage(), - equalTo("searching with a specified routing is not supported because the destination index [test] is in time series mode") + equalTo("searching with a specified routing is not supported because the destination index [test] is in time_series mode") ); } @@ -647,14 +649,42 @@ public void testRoutingPathReadWithInvalidString() throws IOException { int shards = between(2, 1000); IndexRouting indexRouting = indexRoutingForPath(shards, "foo"); Exception e = expectThrows(ResourceNotFoundException.class, () -> shardIdForReadFromSourceExtracting(indexRouting, "!@#")); - assertThat(e.getMessage(), equalTo("invalid id [!@#] for index [test] in time series mode")); + assertThat(e.getMessage(), equalTo("invalid id [!@#] for index [test] in time_series mode")); } public void testRoutingPathReadWithShortString() throws IOException { int shards = between(2, 1000); IndexRouting indexRouting = indexRoutingForPath(shards, "foo"); Exception e = expectThrows(ResourceNotFoundException.class, () -> shardIdForReadFromSourceExtracting(indexRouting, "")); - assertThat(e.getMessage(), equalTo("invalid id [] for index [test] in time series mode")); + assertThat(e.getMessage(), equalTo("invalid id [] for index [test] in time_series mode")); + } + + public void testRoutingPathLogsdb() throws IOException { + int shards = between(2, 1000); + IndexRouting routing = IndexRouting.fromIndexMetadata( + IndexMetadata.builder("test") + .settings( + settings(IndexVersion.current()).put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB) + .build() + ) + .numberOfShards(shards) + .numberOfReplicas(1) + .build() + ); + + IndexRequest req = new IndexRequest(); + routing.preProcess(req); + assertNull(req.id()); + + // Verify that routing uses the field name and value in the routing path. + int expectedShard = Math.floorMod(hash(List.of("foo", "A")), shards); + BytesReference sourceBytes = source(Map.of("foo", "A", "bar", "B")); + assertEquals(expectedShard, routing.indexShard(null, null, XContentType.JSON, sourceBytes)); + + // Verify that the request id gets updated to contain the routing hash. + routing.postProcess(req); + assertEquals(expectedShard, routing.getShard(req.id(), null)); } /** @@ -673,7 +703,11 @@ private IndexRouting indexRoutingForPath(int shards, String path) { private IndexRouting indexRoutingForPath(IndexVersion createdVersion, int shards, String path) { return IndexRouting.fromIndexMetadata( IndexMetadata.builder("test") - .settings(settings(createdVersion).put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), path)) + .settings( + settings(createdVersion).put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), path) + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .build() + ) .numberOfShards(shards) .numberOfReplicas(1) .build() diff --git a/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java b/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java index 964683a1972ba..fa51f89dea9c3 100644 --- a/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java +++ b/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java @@ -9,12 +9,14 @@ package org.elasticsearch.common; +import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.test.ESTestCase; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Base64; import java.util.HashSet; +import java.util.OptionalInt; import java.util.Set; import java.util.function.Supplier; import java.util.stream.IntStream; @@ -106,6 +108,12 @@ public void testUUIDEncodingDecodingWithRandomValues() { ); } + public void testUUIDEncodingDecodingWithHash() { + int hash = randomInt(); + byte[] decoded = Base64.getUrlDecoder().decode(UUIDs.base64TimeBasedKOrderedUUIDWithHash(OptionalInt.of(hash))); + assertEquals(hash, ByteUtils.readIntLE(decoded, decoded.length - 9)); + } + private void testUUIDEncodingDecodingHelper(final long timestamp, final int sequenceId, final byte[] macAddress) { final TestTimeBasedKOrderedUUIDDecoder decoder = new TestTimeBasedKOrderedUUIDDecoder( createKOrderedGenerator(() -> timestamp, () -> sequenceId, () -> macAddress).getBase64UUID() diff --git a/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java b/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java index d0fa037079255..93dedbb355fbd 100644 --- a/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java +++ b/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java @@ -173,14 +173,7 @@ public void testRoutingPathEqualsObjectNameError() { b.startObject("dim").field("type", "keyword").field("time_series_dimension", true).endObject(); b.endObject().endObject(); }))); - assertThat( - e.getMessage(), - equalTo( - "All fields that match routing_path must be configured with [time_series_dimension: true] " - + "or flattened fields with a list of dimensions in [time_series_dimensions] and " - + "without the [script] parameter. [dim.o] was [object]." - ) - ); + assertThat(e.getMessage(), equalTo("All fields that match routing_path must be flattened fields. [dim.o] was [object].")); } public void testRoutingPathMatchesNonDimensionKeyword() { diff --git a/x-pack/plugin/logsdb/build.gradle b/x-pack/plugin/logsdb/build.gradle index 1aef69e0e3fac..f66dc23ff41bb 100644 --- a/x-pack/plugin/logsdb/build.gradle +++ b/x-pack/plugin/logsdb/build.gradle @@ -8,6 +8,7 @@ evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.internal-es-plugin' +apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-yaml-rest-test' @@ -29,7 +30,9 @@ restResources { dependencies { compileOnly project(path: xpackModule('core')) + testImplementation project(':modules:data-streams') testImplementation(testArtifact(project(xpackModule('core')))) + internalClusterTestImplementation(testArtifact(project(xpackModule('core')))) } tasks.named("javaRestTest").configure { diff --git a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java index 381c83ceee289..4a9d13bc642d7 100644 --- a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java +++ b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java @@ -9,7 +9,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -203,4 +205,59 @@ public void testLogsdbOverrideDefaultModeForLogsIndex() throws IOException { assertEquals("logsdb", settings.get("index.mode")); assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); } + + public void testLogsdbRouteOnSortFields() throws IOException { + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{ \"transient\": { \"cluster.logsdb.enabled\": true } }"); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["my-log-*"], + "data_stream": { + }, + "template": { + "settings":{ + "index": { + "mode": "logsdb", + "sort.field": [ "host.name", "message", "@timestamp" ], + "logsdb.route_on_sort_fields": true + } + }, + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "message": { + "type": "keyword" + } + } + } + } + } + """); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/my-log-foo/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "2020-01-01T00:00:00.000Z", + "host.name": "foo", + "message": "bar" + } + """); + assertOK(client().performRequest(request)); + + String index = DataStream.getDefaultBackingIndexName("my-log-foo", 1); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + assertEquals("true", settings.get(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey())); + assertEquals(List.of("host.name", "message"), settings.get(IndexMetadata.INDEX_ROUTING_PATH.getKey())); + } } diff --git a/x-pack/plugin/logsdb/src/internalClusterTest/java/org/elasticsearch/xpack/logsdb/LogsIndexingIT.java b/x-pack/plugin/logsdb/src/internalClusterTest/java/org/elasticsearch/xpack/logsdb/LogsIndexingIT.java new file mode 100644 index 0000000000000..1ab49f91376f1 --- /dev/null +++ b/x-pack/plugin/logsdb/src/internalClusterTest/java/org/elasticsearch/xpack/logsdb/LogsIndexingIT.java @@ -0,0 +1,213 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.license.LicenseSettings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.time.Instant; +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class LogsIndexingIT extends ESSingleNodeTestCase { + + public static final String MAPPING_TEMPLATE = """ + { + "_doc":{ + "properties": { + "@timestamp" : { + "type": "date" + }, + "message": { + "type": "keyword" + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "uid": { + "type": "keyword" + } + } + } + } + } + } + } + }"""; + + private static final String DOC = """ + { + "@timestamp": "$time", + "message": "$pod", + "k8s": { + "pod": { + "name": "dog", + "uid":"$uuid", + "ip": "10.10.55.3", + "network": { + "tx": 1434595272, + "rx": 530605511 + } + } + } + } + """; + + @Override + protected Collection> getPlugins() { + return List.of(InternalSettingsPlugin.class, XPackPlugin.class, LogsDBPlugin.class, DataStreamsPlugin.class); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put("cluster.logsdb.enabled", "true") + .put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial") + .build(); + } + + public void testStandard() throws Exception { + String dataStreamName = "k8s"; + var putTemplateRequest = new TransportPutComposableIndexTemplateAction.Request("id"); + putTemplateRequest.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template( + new Template( + indexSettings(4, 0).put("index.mode", "logsdb").put("index.sort.field", "message,k8s.pod.uid,@timestamp").build(), + new CompressedXContent(MAPPING_TEMPLATE), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() + ); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, putTemplateRequest).actionGet(); + checkIndexSearchAndRetrieval(dataStreamName, false); + } + + public void testRouteOnSortFields() throws Exception { + String dataStreamName = "k8s"; + var putTemplateRequest = new TransportPutComposableIndexTemplateAction.Request("id"); + putTemplateRequest.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template( + new Template( + indexSettings(4, 0).put("index.mode", "logsdb") + .put("index.sort.field", "message,k8s.pod.uid,@timestamp") + .put("index.logsdb.route_on_sort_fields", true) + .build(), + new CompressedXContent(MAPPING_TEMPLATE), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() + ); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, putTemplateRequest).actionGet(); + checkIndexSearchAndRetrieval(dataStreamName, true); + } + + private void checkIndexSearchAndRetrieval(String dataStreamName, boolean routeOnSortFields) throws Exception { + String[] uuis = { + UUID.randomUUID().toString(), + UUID.randomUUID().toString(), + UUID.randomUUID().toString(), + UUID.randomUUID().toString() }; + int numBulkRequests = randomIntBetween(128, 1024); + int numDocsPerBulk = randomIntBetween(16, 256); + String indexName = null; + { + Instant time = Instant.now(); + for (int i = 0; i < numBulkRequests; i++) { + BulkRequest bulkRequest = new BulkRequest(dataStreamName); + for (int j = 0; j < numDocsPerBulk; j++) { + var indexRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE); + indexRequest.source( + DOC.replace("$time", formatInstant(time)) + .replace("$uuid", uuis[j % uuis.length]) + .replace("$pod", "pod-" + randomIntBetween(0, 10)), + XContentType.JSON + ); + bulkRequest.add(indexRequest); + time = time.plusMillis(1); + } + var bulkResponse = client().bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.hasFailures(), is(false)); + indexName = bulkResponse.getItems()[0].getIndex(); + } + client().admin().indices().refresh(new RefreshRequest(dataStreamName)).actionGet(); + } + + // Verify settings. + final GetSettingsResponse getSettingsResponse = indicesAdmin().getSettings( + new GetSettingsRequest().indices(indexName).includeDefaults(false) + ).actionGet(); + final Settings settings = getSettingsResponse.getIndexToSettings().get(indexName); + assertEquals("message,k8s.pod.uid,@timestamp", settings.get("index.sort.field")); + if (routeOnSortFields) { + assertEquals("[message, k8s.pod.uid]", settings.get("index.routing_path")); + assertEquals("true", settings.get("index.logsdb.route_on_sort_fields")); + } else { + assertNull(settings.get("index.routing_path")); + assertNull(settings.get("index.logsdb.route_on_sort_fields")); + } + + // Check the search api can synthesize _id + final String idxName = indexName; + var searchRequest = new SearchRequest(dataStreamName); + searchRequest.source().trackTotalHits(true); + assertResponse(client().search(searchRequest), searchResponse -> { + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numBulkRequests * numDocsPerBulk)); + + for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { + String id = searchResponse.getHits().getHits()[i].getId(); + assertThat(id, notNullValue()); + + // Check that the _id is gettable: + var getResponse = client().get(new GetRequest(idxName).id(id)).actionGet(); + assertThat(getResponse.isExists(), is(true)); + assertThat(getResponse.getId(), equalTo(id)); + } + }); + } + + static String formatInstant(Instant instant) { + return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); + } + +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java index ef9480681f559..bd8093c0a01c1 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java @@ -9,9 +9,11 @@ import org.elasticsearch.client.Request; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; @@ -225,4 +227,58 @@ static String formatInstant(Instant instant) { return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); } + public void testLogsdbRouteOnSortFields() throws IOException { + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{ \"transient\": { \"cluster.logsdb.enabled\": true } }"); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["my-log-*"], + "data_stream": { + }, + "template": { + "settings":{ + "index": { + "mode": "logsdb", + "sort.field": [ "host.name", "message", "@timestamp" ], + "logsdb.route_on_sort_fields": true + } + }, + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "message": { + "type": "keyword" + } + } + } + } + } + """); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/my-log-foo/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "2020-01-01T00:00:00.000Z", + "host.name": "foo", + "message": "bar" + } + """); + assertOK(client().performRequest(request)); + + String index = DataStream.getDefaultBackingIndexName("my-log-foo", 1); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertNull(settings.get("index.mapping.source.mode")); + assertEquals("true", settings.get(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey())); + assertEquals(List.of("host.name", "message"), settings.get(IndexMetadata.INDEX_ROUTING_PATH.getKey())); + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java index 977b0e1c57578..f95b64f8c0ec9 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java @@ -21,12 +21,15 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import java.io.IOException; import java.time.Instant; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.function.Supplier; @@ -79,6 +82,9 @@ public Settings getAdditionalIndexSettings( final List combinedTemplateMappings ) { Settings.Builder settingsBuilder = null; + boolean isLogsDB = templateIndexMode == IndexMode.LOGSDB; + + // Inject logsdb index mode, based on the logs pattern. if (isLogsdbEnabled && dataStreamName != null && resolveIndexMode(settings.get(IndexSettings.MODE.getKey())) == null @@ -87,8 +93,10 @@ && matchesLogsPattern(dataStreamName)) { if (supportFallbackToStoredSource()) { settings = Settings.builder().put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()).put(settings).build(); } + isLogsDB = true; } + // Inject stored source mode if synthetic source if not available per licence. if (supportFallbackToStoredSource()) { // This index name is used when validating component and index templates, we should skip this check in that case. // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) @@ -110,14 +118,57 @@ && matchesLogsPattern(dataStreamName)) { settingsBuilder.put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString()); } } + + if (isLogsDB) { + // Inject routing path matching sort fields. + if (settings.getAsBoolean(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey(), false)) { + List sortFields = new ArrayList<>(settings.getAsList(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey())); + sortFields.removeIf(s -> s.equals(DataStreamTimestampFieldMapper.DEFAULT_PATH)); + if (sortFields.size() < 2) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "data stream [%s] in logsdb mode and with [%s] index setting has only %d sort fields " + + "(excluding timestamp), needs at least 2", + dataStreamName, + IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey(), + sortFields.size() + ) + ); + } + if (settings.hasValue(IndexMetadata.INDEX_ROUTING_PATH.getKey())) { + List routingPaths = settings.getAsList(IndexMetadata.INDEX_ROUTING_PATH.getKey()); + if (routingPaths.equals(sortFields) == false) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "data stream [%s] in logsdb mode and with [%s] index setting has mismatching sort " + + "and routing fields, [index.routing_path:%s], [index.sort.fields:%s]", + dataStreamName, + IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey(), + routingPaths, + sortFields + ) + ); + } + } else { + if (settingsBuilder == null) { + settingsBuilder = Settings.builder(); + } + settingsBuilder.putList(INDEX_ROUTING_PATH.getKey(), sortFields).build(); + } + } + } + return settingsBuilder == null ? Settings.EMPTY : settingsBuilder.build(); + } private static boolean matchesLogsPattern(final String name) { return Regex.simpleMatch(LOGS_PATTERN, name); } - private IndexMode resolveIndexMode(final String mode) { + private static IndexMode resolveIndexMode(final String mode) { return mode != null ? Enum.valueOf(IndexMode.class, mode.toUpperCase(Locale.ROOT)) : null; } diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProviderTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProviderTests.java index de4f0960f50e7..5d3cb7b2a9967 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProviderTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProviderTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplateMetadata; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; @@ -18,6 +19,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -36,6 +38,7 @@ import static org.elasticsearch.common.settings.Settings.builder; import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createEnterpriseLicense; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -43,6 +46,7 @@ public class LogsdbIndexModeSettingsProviderTests extends ESTestCase { + private static final String DATA_STREAM_NAME = "logs-app1"; public static final String DEFAULT_MAPPING = """ { "_doc": { @@ -385,7 +389,7 @@ private void assertIndexMode(final Settings settings, final String expectedIndex } public void testNewIndexHasSyntheticSourceUsage() throws IOException { - String dataStreamName = "logs-app1"; + String dataStreamName = DATA_STREAM_NAME; String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); Settings settings = Settings.EMPTY; LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(false); @@ -472,7 +476,7 @@ public void testValidateIndexName() throws IOException { } public void testNewIndexHasSyntheticSourceUsageLogsdbIndex() throws IOException { - String dataStreamName = "logs-app1"; + String dataStreamName = DATA_STREAM_NAME; String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); String mapping = """ { @@ -516,7 +520,7 @@ public void testNewIndexHasSyntheticSourceUsageLogsdbIndex() throws IOException } public void testNewIndexHasSyntheticSourceUsageTimeSeries() throws IOException { - String dataStreamName = "logs-app1"; + String dataStreamName = DATA_STREAM_NAME; String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); String mapping = """ { @@ -557,7 +561,7 @@ public void testNewIndexHasSyntheticSourceUsageTimeSeries() throws IOException { } public void testNewIndexHasSyntheticSourceUsage_invalidSettings() throws IOException { - String dataStreamName = "logs-app1"; + String dataStreamName = DATA_STREAM_NAME; String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); Settings settings = Settings.builder().put("index.soft_deletes.enabled", false).build(); LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(false); @@ -599,7 +603,7 @@ public void testNewIndexHasSyntheticSourceUsage_invalidSettings() throws IOExcep } public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSource() throws IOException { - String dataStreamName = "logs-app1"; + String dataStreamName = DATA_STREAM_NAME; Metadata.Builder mb = Metadata.builder( DataStreamTestHelper.getClusterStateWithDataStreams( List.of(Tuple.tuple(dataStreamName, 1)), @@ -672,7 +676,7 @@ public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSourceFileMatch( LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(true); final Settings settings = Settings.EMPTY; - String dataStreamName = "logs-app1"; + String dataStreamName = DATA_STREAM_NAME; Metadata.Builder mb = Metadata.builder( DataStreamTestHelper.getClusterStateWithDataStreams( List.of(Tuple.tuple(dataStreamName, 1)), @@ -731,4 +735,86 @@ public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSourceFileMatch( assertThat(result.size(), equalTo(0)); } + public void testLogsdbRoutingPathOnSortFields() throws Exception { + var settings = Settings.builder() + .put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "host,message") + .put(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey(), true) + .build(); + Settings result = generateLogsdbSettings(settings); + assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), contains("host", "message")); + } + + public void testLogsdbRoutingPathOnSortFieldsFilterTimestamp() throws Exception { + var settings = Settings.builder() + .put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "host,message,@timestamp") + .put(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey(), true) + .build(); + Settings result = generateLogsdbSettings(settings); + assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), contains("host", "message")); + } + + public void testLogsdbRoutingPathOnSortSingleField() throws Exception { + var settings = Settings.builder() + .put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "host") + .put(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey(), true) + .build(); + Exception e = expectThrows(IllegalStateException.class, () -> generateLogsdbSettings(settings)); + assertThat( + e.getMessage(), + equalTo( + "data stream [" + + DATA_STREAM_NAME + + "] in logsdb mode and with [index.logsdb.route_on_sort_fields] index setting has only 1 sort fields " + + "(excluding timestamp), needs at least 2" + ) + ); + } + + public void testLogsdbExplicitRoutingPathMatchesSortFields() throws Exception { + var settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB) + .put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "host,message,@timestamp") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "host,message") + .put(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey(), true) + .build(); + Settings result = generateLogsdbSettings(settings); + assertTrue(result.isEmpty()); + } + + public void testLogsdbExplicitRoutingPathDoesNotMatchSortFields() throws Exception { + var settings = Settings.builder() + .put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "host,message,@timestamp") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "host,message,foo") + .put(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey(), true) + .build(); + Exception e = expectThrows(IllegalStateException.class, () -> generateLogsdbSettings(settings)); + assertThat( + e.getMessage(), + equalTo( + "data stream [" + + DATA_STREAM_NAME + + "] in logsdb mode and with [index." + + "logsdb.route_on_sort_fields] index setting has mismatching sort " + + "and routing fields, [index.routing_path:[host, message, foo]], [index.sort.fields:[host, message]]" + ) + ); + } + + private Settings generateLogsdbSettings(Settings settings) throws IOException { + Metadata metadata = Metadata.EMPTY_METADATA; + var provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, + Settings.builder().put("cluster.logsdb.enabled", true).build() + ); + var result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(DATA_STREAM_NAME, 0), + DATA_STREAM_NAME, + IndexMode.LOGSDB, + metadata, + Instant.now(), + settings, + List.of() + ); + return builder().put(result).build(); + } } From f3a0fb7078696261da612fe6745027971e8f477e Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Thu, 19 Dec 2024 13:45:26 +0100 Subject: [PATCH 35/62] Ensure nested field could be used in lookup joins (#118963) --- .../xpack/esql/CsvTestsDataLoader.java | 6 +++ .../data/languages_nested_fields.csv | 5 ++ .../src/main/resources/lookup-join.csv-spec | 49 +++++++++++++++++++ .../mapping-languages_nested_fields.json | 22 +++++++++ 4 files changed, 82 insertions(+) create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_nested_fields.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-languages_nested_fields.json diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 1d2de407219ee..7aca63182e2b1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -64,6 +64,11 @@ public class CsvTestsDataLoader { .withSetting("languages_lookup-settings.json"); private static final TestsDataset LANGUAGES_LOOKUP_NON_UNIQUE_KEY = LANGUAGES_LOOKUP.withIndex("languages_lookup_non_unique_key") .withData("languages_non_unique_key.csv"); + private static final TestsDataset LANGUAGES_NESTED_FIELDS = new TestsDataset( + "languages_nested_fields", + "mapping-languages_nested_fields.json", + "languages_nested_fields.csv" + ).withSetting("languages_lookup-settings.json"); private static final TestsDataset ALERTS = new TestsDataset("alerts"); private static final TestsDataset UL_LOGS = new TestsDataset("ul_logs"); private static final TestsDataset SAMPLE_DATA = new TestsDataset("sample_data"); @@ -116,6 +121,7 @@ public class CsvTestsDataLoader { Map.entry(LANGUAGES.indexName, LANGUAGES), Map.entry(LANGUAGES_LOOKUP.indexName, LANGUAGES_LOOKUP), Map.entry(LANGUAGES_LOOKUP_NON_UNIQUE_KEY.indexName, LANGUAGES_LOOKUP_NON_UNIQUE_KEY), + Map.entry(LANGUAGES_NESTED_FIELDS.indexName, LANGUAGES_NESTED_FIELDS), Map.entry(UL_LOGS.indexName, UL_LOGS), Map.entry(SAMPLE_DATA.indexName, SAMPLE_DATA), Map.entry(MV_SAMPLE_DATA.indexName, MV_SAMPLE_DATA), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_nested_fields.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_nested_fields.csv new file mode 100644 index 0000000000000..154125cf49304 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_nested_fields.csv @@ -0,0 +1,5 @@ +_id:integer,language.id:integer,language.name:text,language.code:keyword +1,1,English,EN +2,2,French,FR +3,3,Spanish,ES +4,4,German,DE diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 7d4f89ed920a9..618149f2c3dde 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -416,6 +416,55 @@ language_code:integer | language_name:keyword | country:keyword 8 | Mv-Lang2 | Mv-Land2 ; +########################################################################### +# nested filed join behavior with languages_nested_fields index +########################################################################### + +joinOnNestedField +required_capability: join_lookup_v8 + +FROM employees +| WHERE 10000 < emp_no AND emp_no < 10006 +| EVAL language.id = emp_no % 10 +| LOOKUP JOIN languages_nested_fields ON language.id +| SORT emp_no +| KEEP emp_no, language.id, language.name +; + +emp_no:integer | language.id:integer | language.name:text +10001 | 1 | English +10002 | 2 | French +10003 | 3 | Spanish +10004 | 4 | German +10005 | 5 | null +; + + +joinOnNestedFieldRow +required_capability: join_lookup_v8 + +ROW language.code = "EN" +| LOOKUP JOIN languages_nested_fields ON language.code +| KEEP language.id, language.code, language.name.keyword +; + +language.id:integer | language.code:keyword | language.name.keyword:keyword +1 | EN | English +; + + +joinOnNestedNestedFieldRow +required_capability: join_lookup_v8 + +ROW language.name.keyword = "English" +| LOOKUP JOIN languages_nested_fields ON language.name.keyword +| KEEP language.id, language.name, language.name.keyword +; + +language.id:integer | language.name:text | language.name.keyword:keyword +1 | English | English +; + ############################################### # Tests with clientips_lookup index ############################################### diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-languages_nested_fields.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-languages_nested_fields.json new file mode 100644 index 0000000000000..9b46a85ed8d11 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-languages_nested_fields.json @@ -0,0 +1,22 @@ +{ + "properties" : { + "language" : { + "properties" : { + "id": { + "type": "integer" + }, + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "code": { + "type": "keyword" + } + } + } + } +} From e15b9b995ebacc5d620fe09a8096475796a73375 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Thu, 19 Dec 2024 14:06:08 +0100 Subject: [PATCH 36/62] ESQL: Set LOOKUP JOIN YAML test number of shards to 1 (#119069) To mitigate https://github.com/elastic/elasticsearch/issues/119035 --- .../resources/rest-api-spec/test/esql/190_lookup_join.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml index fdb6746bbeed8..5b39f74de1b9d 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml @@ -12,8 +12,6 @@ setup: indices.create: index: test body: - settings: - number_of_shards: 5 mappings: properties: key: @@ -27,6 +25,7 @@ setup: settings: index: mode: lookup + number_of_shards: 1 mappings: properties: key: From 252f66ddc5ae6e18471b5448cd87f2648ed109d1 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 19 Dec 2024 14:20:51 +0100 Subject: [PATCH 37/62] Handle empty index case in LuceneSyntheticSourceChangesSnapshot (#118996) In case of synthetic recovery source when the mapping is empty. A test that reproduces failure in #118955 consistently with a potential fix. `MapperService#updateMapping(...)` doesn't set the mapper field if a mapping has no fields, which is what is used in InternalEngine#newChangesSnapshot(...) . This happens when `newMappingMetadata` variable in `MapperService updateMapping(...)` is `null`. Causing an assertion to trip. This change adjusts that assertion to handle an empty index. Closes #118955 --- muted-tests.yml | 3 -- .../indices.create/20_synthetic_source.yml | 10 +++--- .../indices.recovery/20_synthetic_source.yml | 33 +++++++++++++++++++ .../elasticsearch/index/IndexSettings.java | 3 ++ .../LuceneSyntheticSourceChangesSnapshot.java | 5 +-- 5 files changed, 43 insertions(+), 11 deletions(-) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/20_synthetic_source.yml diff --git a/muted-tests.yml b/muted-tests.yml index 8cfc7c082473f..5b9997fbcd89f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -288,9 +288,6 @@ tests: - class: org.elasticsearch.cluster.service.MasterServiceTests method: testThreadContext issue: https://github.com/elastic/elasticsearch/issues/118914 -- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT - method: test {yaml=indices.create/20_synthetic_source/create index with use_synthetic_source} - issue: https://github.com/elastic/elasticsearch/issues/118955 - class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT issue: https://github.com/elastic/elasticsearch/issues/118970 - class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index edb684168278b..5003f6df79a14 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -2036,14 +2036,12 @@ create index with use_synthetic_source: - is_true: test.settings.index.recovery.use_synthetic_source - do: - bulk: + index: index: test + id: 1 refresh: true - body: - - '{ "create": { } }' - - '{ "field": "aaaa" }' - - '{ "create": { } }' - - '{ "field": "bbbb" }' + body: { foo: bar } + - match: { _version: 1 } - do: indices.disk_usage: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/20_synthetic_source.yml new file mode 100644 index 0000000000000..493b834fc5a90 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/20_synthetic_source.yml @@ -0,0 +1,33 @@ +--- +test recovery empty index with use_synthetic_source: + - requires: + cluster_features: ["mapper.synthetic_recovery_source"] + reason: requires synthetic recovery source + + - do: + indices.create: + index: test + body: + settings: + index: + number_of_replicas: 0 + recovery: + use_synthetic_source: true + mapping: + source: + mode: synthetic + + - do: + indices.get_settings: {} + - match: { test.settings.index.mapping.source.mode: synthetic} + - is_true: test.settings.index.recovery.use_synthetic_source + + - do: + indices.put_settings: + index: test + body: + index.number_of_replicas: 1 + + - do: + cluster.health: + wait_for_events: languid diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 3d6ec7e2e9242..c8a983a48ff16 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -1049,6 +1049,9 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti recoverySourceEnabled = RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING.get(nodeSettings); recoverySourceSyntheticEnabled = scopedSettings.get(RECOVERY_USE_SYNTHETIC_SOURCE_SETTING); if (recoverySourceSyntheticEnabled) { + if (DiscoveryNode.isStateless(settings)) { + throw new IllegalArgumentException("synthetic recovery source is only allowed in stateful"); + } // Verify that all nodes can handle this setting if (version.before(IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY) && version.between( diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java index f21a3c06ab015..08508103181ed 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java @@ -77,12 +77,13 @@ public LuceneSyntheticSourceChangesSnapshot( IndexVersion indexVersionCreated ) throws IOException { super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated); - assert mappingLookup.isSourceSynthetic(); + // a MapperService#updateMapping(...) of empty index may not have been invoked and then mappingLookup is empty + assert engineSearcher.getDirectoryReader().maxDoc() == 0 || mappingLookup.isSourceSynthetic() + : "either an empty index or synthetic source must be enabled for proper functionality."; // ensure we can buffer at least one document this.maxMemorySizeInBytes = maxMemorySizeInBytes > 0 ? maxMemorySizeInBytes : 1; this.sourceLoader = mappingLookup.newSourceLoader(null, SourceFieldMetrics.NOOP); Set storedFields = sourceLoader.requiredStoredFields(); - assert mappingLookup.isSourceSynthetic() : "synthetic source must be enabled for proper functionality."; this.storedFieldLoader = StoredFieldLoader.create(false, storedFields); this.lastSeenSeqNo = fromSeqNo - 1; } From c8d2c7d851d13e1ac0697c66ce4050ff8567d770 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 19 Dec 2024 14:23:13 +0100 Subject: [PATCH 38/62] Fix NodeJoinExecutorTests#testSuccess (#119064) This is related to #119013, we can lower the minimum compatible version to read only compatible version to make the test succeed at all times. Closes #119052 --- muted-tests.yml | 3 --- .../cluster/coordination/NodeJoinExecutorTests.java | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 5b9997fbcd89f..6d2b71995e5aa 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -298,9 +298,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116777 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT issue: https://github.com/elastic/elasticsearch/issues/115727 -- class: org.elasticsearch.cluster.coordination.NodeJoinExecutorTests - method: testSuccess - issue: https://github.com/elastic/elasticsearch/issues/119052 # Examples: # diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 492a142492e18..34add82e66557 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -467,7 +467,7 @@ public void testSuccess() { .build(); metaBuilder.put(indexMetadata, false); Metadata metadata = metaBuilder.build(); - NodeJoinExecutor.ensureIndexCompatibility(IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current(), metadata); + NodeJoinExecutor.ensureIndexCompatibility(IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), metadata); } public static Settings.Builder randomCompatibleVersionSettings() { From 22c2db86bc0b8f243852b61ee593524383ae1329 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 19 Dec 2024 14:23:49 +0100 Subject: [PATCH 39/62] Remove UpdateForV9 in PersistedClusterStateServiceTests (#119046) The testOverrideLuceneVersion is renamed to testOverrideNodeVersion. While it updates the user data on the lucene index, it does not override the lucene version. The UpdateForV9 does not require further action. --- .../gateway/PersistedClusterStateServiceTests.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index 4428a7e078510..7e761cb10d4d6 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -54,7 +54,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -1392,7 +1391,7 @@ public void testLimitsFileCount() throws IOException { } } - public void testOverrideLuceneVersion() throws IOException { + public void testOverrideNodeVersion() throws IOException { try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment); final String clusterUUID = UUIDs.randomBase64UUID(random()); @@ -1415,9 +1414,7 @@ public void testOverrideLuceneVersion() throws IOException { assertThat(clusterState.metadata().version(), equalTo(version)); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) BuildVersion overrideVersion = BuildVersion.fromVersionId(Version.V_8_0_0.id); - NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); assertEquals(BuildVersion.current(), prevMetadata.nodeVersion()); PersistedClusterStateService.overrideVersion(overrideVersion, persistedClusterStateService.getDataPaths()); From 20a3492a762db063d6befd1c3e9f24cff574583f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 20 Dec 2024 00:42:33 +1100 Subject: [PATCH 40/62] Mute org.elasticsearch.xpack.security.authc.kerberos.KerberosAuthenticationIT org.elasticsearch.xpack.security.authc.kerberos.KerberosAuthenticationIT #118414 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 6d2b71995e5aa..833fdcfc93ad7 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -298,6 +298,8 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116777 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT issue: https://github.com/elastic/elasticsearch/issues/115727 +- class: org.elasticsearch.xpack.security.authc.kerberos.KerberosAuthenticationIT + issue: https://github.com/elastic/elasticsearch/issues/118414 # Examples: # From 0a9b5b71c1086108540e4c39fb7675c383cfb80e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Dec 2024 15:02:45 +0100 Subject: [PATCH 41/62] Restore AnalysisModuleTests#testStandardFilterBWC (#119060) This test has been removed with #112570 but with the coming read-only support for v7 we need it back. --- .../indices/analysis/AnalysisModuleTests.java | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 1bcd84aadd6cd..abaab1ac8983b 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.index.IndexService.IndexCreationContext; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.Analysis; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.CharFilterFactory; @@ -186,6 +187,34 @@ public void testUnderscoreInAnalyzerName() throws IOException { } } + public void testStandardFilterBWC() throws IOException { + // standard tokenfilter should have been removed entirely in the 7x line. However, a + // cacheing bug meant that it was still possible to create indexes using a standard + // filter until 7.6 + { + IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_6_0, IndexVersion.current()); + final Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_standard.tokenizer", "standard") + .put("index.analysis.analyzer.my_standard.filter", "standard") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .build(); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> getIndexAnalyzers(settings)); + assertThat(exc.getMessage(), equalTo("The [standard] token filter has been removed.")); + } + { + IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2); + final Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_standard.tokenizer", "standard") + .put("index.analysis.analyzer.my_standard.filter", "standard") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .build(); + getIndexAnalyzers(settings); + assertWarnings("The [standard] token filter is deprecated and will be removed in a future version."); + } + } + /** * Tests that plugins can register pre-configured char filters that vary in behavior based on Elasticsearch version, Lucene version, * and that do not vary based on version at all. From bdd6da59b1d8e72d7188a5233ba06433304d3138 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 20 Dec 2024 01:08:38 +1100 Subject: [PATCH 42/62] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlClientYamlIT org.elasticsearch.xpack.esql.qa.multi_node.EsqlClientYamlIT #119086 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 833fdcfc93ad7..c810575264df5 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -300,6 +300,8 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/115727 - class: org.elasticsearch.xpack.security.authc.kerberos.KerberosAuthenticationIT issue: https://github.com/elastic/elasticsearch/issues/118414 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlClientYamlIT + issue: https://github.com/elastic/elasticsearch/issues/119086 # Examples: # From 896818c880c55f0ca1a519fe96f78b406a576f95 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 20 Dec 2024 01:28:15 +1100 Subject: [PATCH 43/62] Mute org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilderOverShapeTests testToQuery #119090 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c810575264df5..92850ac343ffe 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -302,6 +302,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/118414 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlClientYamlIT issue: https://github.com/elastic/elasticsearch/issues/119086 +- class: org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilderOverShapeTests + method: testToQuery + issue: https://github.com/elastic/elasticsearch/issues/119090 # Examples: # From b2879c32f1b3f7be40c8ee57b255271dfbcc1fcc Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 20 Dec 2024 01:28:29 +1100 Subject: [PATCH 44/62] Mute org.elasticsearch.xpack.spatial.index.query.GeoShapeQueryBuilderGeoShapeTests testToQuery #119091 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 92850ac343ffe..f2294939b7aab 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -305,6 +305,9 @@ tests: - class: org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilderOverShapeTests method: testToQuery issue: https://github.com/elastic/elasticsearch/issues/119090 +- class: org.elasticsearch.xpack.spatial.index.query.GeoShapeQueryBuilderGeoShapeTests + method: testToQuery + issue: https://github.com/elastic/elasticsearch/issues/119091 # Examples: # From 5f293f34f77a8621752f4817609d14aaf916ccb5 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Thu, 19 Dec 2024 16:02:01 +0100 Subject: [PATCH 45/62] ESQL: Add a LicenseAware interface for licensed Nodes (#118931) This adds a new interface that elements that require a proper license state can implement to enforce the license requirement. This can be now applied to any node or node property. The check still happens in the Verifier, since the plan needs to be analysed first and the check still only happens if no other verification faults exist already. Fixes #117405 --- docs/changelog/118931.yaml | 6 ++ .../core/expression/function/Function.java | 6 -- .../xpack/esql/LicenseAware.java | 15 ++++ .../xpack/esql/analysis/Verifier.java | 16 ++-- .../aggregate/SpatialAggregateFunction.java | 5 +- .../function/CheckLicenseTests.java | 86 ++++++++++++++----- 6 files changed, 98 insertions(+), 36 deletions(-) create mode 100644 docs/changelog/118931.yaml create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/LicenseAware.java diff --git a/docs/changelog/118931.yaml b/docs/changelog/118931.yaml new file mode 100644 index 0000000000000..81e9b3cb16521 --- /dev/null +++ b/docs/changelog/118931.yaml @@ -0,0 +1,6 @@ +pr: 118931 +summary: Add a `LicenseAware` interface for licensed Nodes +area: ES|QL +type: enhancement +issues: + - 117405 diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Function.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Function.java index a1afcdbf1f77c..cad5c631088f2 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Function.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Function.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.expression.function; -import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Nullability; @@ -43,11 +42,6 @@ public Nullability nullable() { return Expressions.nullable(children()); } - /** Return true if this function can be executed under the provided {@link XPackLicenseState}, otherwise false.*/ - public boolean checkLicense(XPackLicenseState state) { - return true; - } - @Override public int hashCode() { return Objects.hash(getClass(), children()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/LicenseAware.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/LicenseAware.java new file mode 100644 index 0000000000000..04fcdb8a7c8e1 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/LicenseAware.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql; + +import org.elasticsearch.license.XPackLicenseState; + +public interface LicenseAware { + /** Return true if the implementer can be executed under the provided {@link XPackLicenseState}, otherwise false.*/ + boolean licenseCheck(XPackLicenseState state); +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 93e9d59ed8c6e..e146b517ad1c8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.analysis; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.esql.LicenseAware; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; @@ -26,6 +27,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.esql.core.tree.Node; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; @@ -209,7 +211,7 @@ else if (p instanceof Lookup lookup) { checkRemoteEnrich(plan, failures); if (failures.isEmpty()) { - checkLicense(plan, licenseState, failures); + licenseCheck(plan, failures); } // gather metrics @@ -587,11 +589,15 @@ private static void checkBinaryComparison(LogicalPlan p, Set failures) }); } - private void checkLicense(LogicalPlan plan, XPackLicenseState licenseState, Set failures) { - plan.forEachExpressionDown(Function.class, p -> { - if (p.checkLicense(licenseState) == false) { - failures.add(new Failure(p, "current license is non-compliant for function [" + p.sourceText() + "]")); + private void licenseCheck(LogicalPlan plan, Set failures) { + Consumer> licenseCheck = n -> { + if (n instanceof LicenseAware la && la.licenseCheck(licenseState) == false) { + failures.add(fail(n, "current license is non-compliant for [{}]", n.sourceText())); } + }; + plan.forEachDown(p -> { + licenseCheck.accept(p); + p.forEachExpression(Expression.class, licenseCheck); }); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java index 35f99e4b648df..f68f9f2487884 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java @@ -11,6 +11,7 @@ import org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference; import org.elasticsearch.license.License; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.esql.LicenseAware; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -24,7 +25,7 @@ * The AggregateMapper class will generate multiple aggregation functions for each combination, allowing the planner to * select the best one. */ -public abstract class SpatialAggregateFunction extends AggregateFunction { +public abstract class SpatialAggregateFunction extends AggregateFunction implements LicenseAware { protected final FieldExtractPreference fieldExtractPreference; protected SpatialAggregateFunction(Source source, Expression field, Expression filter, FieldExtractPreference fieldExtractPreference) { @@ -41,7 +42,7 @@ protected SpatialAggregateFunction(StreamInput in, FieldExtractPreference fieldE public abstract SpatialAggregateFunction withDocValues(); @Override - public boolean checkLicense(XPackLicenseState state) { + public boolean licenseCheck(XPackLicenseState state) { return switch (field().dataType()) { case GEO_SHAPE, CARTESIAN_SHAPE -> state.isAllowedByLicense(License.OperationMode.PLATINUM); default -> true; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/CheckLicenseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/CheckLicenseTests.java index 98f36d339976c..19af9892015b2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/CheckLicenseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/CheckLicenseTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.license.internal.XPackLicenseStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.LicenseAware; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; @@ -25,10 +26,12 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.parser.EsqlParser; +import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.stats.Metrics; import java.util.List; +import java.util.Objects; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyzerDefaultMapping; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.defaultEnrichResolution; @@ -44,25 +47,28 @@ public void testLicense() { final LicensedFeature functionLicenseFeature = random().nextBoolean() ? LicensedFeature.momentary("test", "license", functionLicense) : LicensedFeature.persistent("test", "license", functionLicense); - final EsqlFunctionRegistry.FunctionBuilder builder = (source, expression, cfg) -> { - final LicensedFunction licensedFunction = new LicensedFunction(source); - licensedFunction.setLicensedFeature(functionLicenseFeature); - return licensedFunction; - }; for (License.OperationMode operationMode : License.OperationMode.values()) { if (License.OperationMode.TRIAL != operationMode && License.OperationMode.compare(operationMode, functionLicense) < 0) { // non-compliant license - final VerificationException ex = expectThrows(VerificationException.class, () -> analyze(builder, operationMode)); - assertThat(ex.getMessage(), containsString("current license is non-compliant for function [license()]")); + final VerificationException ex = expectThrows( + VerificationException.class, + () -> analyze(operationMode, functionLicenseFeature) + ); + assertThat(ex.getMessage(), containsString("current license is non-compliant for [license()]")); + assertThat(ex.getMessage(), containsString("current license is non-compliant for [LicensedLimit]")); } else { // compliant license - assertNotNull(analyze(builder, operationMode)); + assertNotNull(analyze(operationMode, functionLicenseFeature)); } } } } - private LogicalPlan analyze(EsqlFunctionRegistry.FunctionBuilder builder, License.OperationMode operationMode) { + private LogicalPlan analyze(License.OperationMode operationMode, LicensedFeature functionLicenseFeature) { + final EsqlFunctionRegistry.FunctionBuilder builder = (source, expression, cfg) -> new LicensedFunction( + source, + functionLicenseFeature + ); final FunctionDefinition def = EsqlFunctionRegistry.def(LicensedFunction.class, builder, "license"); final EsqlFunctionRegistry registry = new EsqlFunctionRegistry(def) { @Override @@ -70,7 +76,13 @@ public EsqlFunctionRegistry snapshotRegistry() { return this; } }; - return analyzer(registry, operationMode).analyze(parser.createStatement(esql)); + + var plan = parser.createStatement(esql); + plan = plan.transformDown( + Limit.class, + l -> Objects.equals(l.limit().fold(), 10) ? new LicensedLimit(l.source(), l.limit(), l.child(), functionLicenseFeature) : l + ); + return analyzer(registry, operationMode).analyze(plan); } private static Analyzer analyzer(EsqlFunctionRegistry registry, License.OperationMode operationMode) { @@ -88,25 +100,18 @@ private static XPackLicenseState getLicenseState(License.OperationMode operation // It needs to be public because we run validation on it via reflection in org.elasticsearch.xpack.esql.tree.EsqlNodeSubclassTests. // This test prevents to add the license as constructor parameter too. - public static class LicensedFunction extends Function { + public static class LicensedFunction extends Function implements LicenseAware { - private LicensedFeature licensedFeature; + private final LicensedFeature licensedFeature; - public LicensedFunction(Source source) { + public LicensedFunction(Source source, LicensedFeature licensedFeature) { super(source, List.of()); - } - - void setLicensedFeature(LicensedFeature licensedFeature) { this.licensedFeature = licensedFeature; } @Override - public boolean checkLicense(XPackLicenseState state) { - if (licensedFeature instanceof LicensedFeature.Momentary momentary) { - return momentary.check(state); - } else { - return licensedFeature.checkWithoutTracking(state); - } + public boolean licenseCheck(XPackLicenseState state) { + return checkLicense(state, licensedFeature); } @Override @@ -121,7 +126,7 @@ public Expression replaceChildren(List newChildren) { @Override protected NodeInfo info() { - return NodeInfo.create(this); + return NodeInfo.create(this, LicensedFunction::new, licensedFeature); } @Override @@ -135,4 +140,39 @@ public void writeTo(StreamOutput out) { } } + public static class LicensedLimit extends Limit implements LicenseAware { + + private final LicensedFeature licensedFeature; + + public LicensedLimit(Source source, Expression limit, LogicalPlan child, LicensedFeature licensedFeature) { + super(source, limit, child); + this.licensedFeature = licensedFeature; + } + + @Override + public boolean licenseCheck(XPackLicenseState state) { + return checkLicense(state, licensedFeature); + } + + @Override + public Limit replaceChild(LogicalPlan newChild) { + return new LicensedLimit(source(), limit(), newChild, licensedFeature); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LicensedLimit::new, limit(), child(), licensedFeature); + } + + @Override + public String sourceText() { + return "LicensedLimit"; + } + } + + private static boolean checkLicense(XPackLicenseState state, LicensedFeature licensedFeature) { + return licensedFeature instanceof LicensedFeature.Momentary momentary + ? momentary.check(state) + : licensedFeature.checkWithoutTracking(state); + } } From e43cdf744a90d72aee52f827360210c133f9344b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Dec 2024 16:02:23 +0100 Subject: [PATCH 46/62] Restore v7 support in DateFieldMapper and IPFieldMapper (#119050) This restores deprecation warnings previously removed with #113023 and restores some additional tests due to N-2 read-only support. --- .../index/mapper/DateFieldMapper.java | 19 ++++++- .../index/mapper/IpFieldMapper.java | 20 ++++++- .../index/mapper/DateFieldMapperTests.java | 54 +++++++++++++++++++ .../index/mapper/IpFieldMapperTests.java | 7 +++ 4 files changed, 98 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 87e4ce5f90479..39744cbd39f7c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -25,6 +25,8 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; @@ -34,6 +36,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; @@ -76,6 +79,7 @@ /** A {@link FieldMapper} for dates. */ public final class DateFieldMapper extends FieldMapper { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(DateFieldMapper.class); private static final Logger logger = LogManager.getLogger(DateFieldMapper.class); public static final String CONTENT_TYPE = "date"; @@ -342,7 +346,20 @@ private Long parseNullValue(DateFieldType fieldType) { try { return fieldType.parse(nullValue.getValue()); } catch (Exception e) { - throw new MapperParsingException("Error parsing [null_value] on field [" + leafName() + "]: " + e.getMessage(), e); + if (indexCreatedVersion.onOrAfter(IndexVersions.V_8_0_0)) { + throw new MapperParsingException("Error parsing [null_value] on field [" + leafName() + "]: " + e.getMessage(), e); + } else { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + "date_mapper_null_field", + "Error parsing [" + + nullValue.getValue() + + "] as date in [null_value] on field [" + + leafName() + + "]); [null_value] will be ignored" + ); + return null; + } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 09f44f139d8bc..2f64955b48627 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -23,11 +23,14 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -59,6 +62,8 @@ */ public class IpFieldMapper extends FieldMapper { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IpFieldMapper.class); + public static final String CONTENT_TYPE = "ip"; private static IpFieldMapper toType(FieldMapper in) { @@ -129,7 +134,20 @@ private InetAddress parseNullValue() { try { return InetAddresses.forString(nullValueAsString); } catch (Exception e) { - throw new MapperParsingException("Error parsing [null_value] on field [" + leafName() + "]: " + e.getMessage(), e); + if (indexCreatedVersion.onOrAfter(IndexVersions.V_8_0_0)) { + throw new MapperParsingException("Error parsing [null_value] on field [" + leafName() + "]: " + e.getMessage(), e); + } else { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + "ip_mapper_null_field", + "Error parsing [" + + nullValue.getValue() + + "] as IP in [null_value] on field [" + + leafName() + + "]); [null_value] will be ignored" + ); + return null; + } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 54f6143d5cb30..fcadc7b238a43 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -20,7 +20,9 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.script.DateFieldScript; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -44,6 +46,7 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; public class DateFieldMapperTests extends MapperTestCase { @@ -245,6 +248,10 @@ public void testBadNullValue() throws IOException { + "failed to parse date field [foo] with format [strict_date_optional_time||epoch_millis]" ) ); + + createDocumentMapper(IndexVersions.V_7_9_0, fieldMapping(b -> b.field("type", "date").field("null_value", "foo"))); + + assertWarnings("Error parsing [foo] as date in [null_value] on field [field]); [null_value] will be ignored"); } public void testNullConfigValuesFail() { @@ -757,4 +764,51 @@ public void testLegacyField() throws Exception { assertNotEquals(DEFAULT_DATE_TIME_FORMATTER, ((DateFieldType) service.fieldType("mydate")).dateTimeFormatter); } + public void testLegacyDateFormatName() { + DateFieldMapper.Builder builder = new DateFieldMapper.Builder( + "format", + DateFieldMapper.Resolution.MILLISECONDS, + null, + mock(ScriptService.class), + true, + // BWC compatible index, e.g 7.x + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_0_0, + IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0) + ) + ); + + // Check that we allow the use of camel case date formats on 7.x indices + @SuppressWarnings("unchecked") + FieldMapper.Parameter formatParam = (FieldMapper.Parameter) builder.getParameters()[3]; + formatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime"); + builder.buildFormatter(); // shouldn't throw exception + + formatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime||strictDateOptionalTimeNanos"); + builder.buildFormatter(); // shouldn't throw exception + + DateFieldMapper.Builder newFieldBuilder = new DateFieldMapper.Builder( + "format", + DateFieldMapper.Resolution.MILLISECONDS, + null, + mock(ScriptService.class), + true, + IndexVersion.current() + ); + + @SuppressWarnings("unchecked") + final FieldMapper.Parameter newFormatParam = (FieldMapper.Parameter) newFieldBuilder.getParameters()[3]; + + // Check that we don't allow the use of camel case date formats on 8.x indices + assertEquals( + "Error parsing [format] on field [format]: Invalid format: [strictDateOptionalTime]: Unknown pattern letter: t", + expectThrows(IllegalArgumentException.class, () -> { + newFormatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime"); + assertEquals("strictDateOptionalTime", newFormatParam.getValue()); + newFieldBuilder.buildFormatter(); + }).getMessage() + ); + + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index 86c1157259790..1b8a2d68cd930 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.script.IpFieldScript; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; @@ -208,6 +209,12 @@ public void testNullValue() throws IOException { e.getMessage(), "Failed to parse mapping: Error parsing [null_value] on field [field]: ':1' is not an IP string literal." ); + + createDocumentMapper(IndexVersions.V_7_9_0, fieldMapping(b -> { + b.field("type", "ip"); + b.field("null_value", ":1"); + })); + assertWarnings("Error parsing [:1] as IP in [null_value] on field [field]); [null_value] will be ignored"); } public void testDimension() throws IOException { From 696ee806e7daa225ceecc30089757f8824d4c3ed Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Thu, 19 Dec 2024 10:09:14 -0500 Subject: [PATCH 47/62] Revise content to match new troubleshooting guidelines (#118033) * Revise to match new guidelines * Address review suggestions and comments * Apply suggestions from review Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> * Apply suggestions from review Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> * Apply suggestions from review Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> * Apply suggestions from review --------- Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --- .../common-issues/task-queue-backlog.asciidoc | 130 ++++++++++++------ 1 file changed, 88 insertions(+), 42 deletions(-) diff --git a/docs/reference/troubleshooting/common-issues/task-queue-backlog.asciidoc b/docs/reference/troubleshooting/common-issues/task-queue-backlog.asciidoc index 5aa6a0129c2d4..f233f22cb3fbe 100644 --- a/docs/reference/troubleshooting/common-issues/task-queue-backlog.asciidoc +++ b/docs/reference/troubleshooting/common-issues/task-queue-backlog.asciidoc @@ -1,103 +1,149 @@ [[task-queue-backlog]] -=== Task queue backlog +=== Backlogged task queue -A backlogged task queue can prevent tasks from completing and put the cluster -into an unhealthy state. Resource constraints, a large number of tasks being -triggered at once, and long running tasks can all contribute to a backlogged -task queue. +******************************* +*Product:* Elasticsearch + +*Deployment type:* Elastic Cloud Enterprise, Elastic Cloud Hosted, Elastic Cloud on Kubernetes, Elastic Self-Managed + +*Versions:* All +******************************* + +A backlogged task queue can prevent tasks from completing and lead to an +unhealthy cluster state. Contributing factors include resource constraints, +a large number of tasks triggered at once, and long-running tasks. [discrete] [[diagnose-task-queue-backlog]] -==== Diagnose a task queue backlog +==== Diagnose a backlogged task queue + +To identify the cause of the backlog, try these diagnostic actions. -**Check the thread pool status** +* <> +* <> +* <> +* <> + +[discrete] +[[diagnose-task-queue-thread-pool]] +===== Check the thread pool status A <> can result in <>. -Thread pool depletion might be restricted to a specific <>. If <> is occuring, one node might experience depletion faster than other nodes, leading to performance issues and a growing task backlog. - -You can use the <> to see the number of -active threads in each thread pool and how many tasks are queued, how many -have been rejected, and how many have completed. +Use the <> to monitor +active threads, queued tasks, rejections, and completed tasks: [source,console] ---- GET /_cat/thread_pool?v&s=t,n&h=type,name,node_name,active,queue,rejected,completed ---- -The `active` and `queue` statistics are instantaneous while the `rejected` and -`completed` statistics are cumulative from node startup. +* Look for high `active` and `queue` metrics, which indicate potential bottlenecks +and opportunities to <>. +* Determine whether thread pool issues are specific to a <>. +* Check whether a specific node's thread pool is depleting faster than others. This +might indicate <>. -**Inspect the hot threads on each node** +[discrete] +[[diagnose-task-queue-hot-thread]] +===== Inspect hot threads on each node -If a particular thread pool queue is backed up, you can periodically poll the -<> API to determine if the thread -has sufficient resources to progress and gauge how quickly it is progressing. +If a particular thread pool queue is backed up, periodically poll the +<> to gauge the thread's +progression and ensure it has sufficient resources: [source,console] ---- GET /_nodes/hot_threads ---- -**Look for long running node tasks** +Although the hot threads API response does not list the specific tasks running on a thread, +it provides a summary of the thread's activities. You can correlate a hot threads response +with a <> to identify any overlap with specific tasks. For +example, if the hot threads response indicates the thread is `performing a search query`, you can +<> using the task management API. + +[discrete] +[[diagnose-task-queue-long-running-node-tasks]] +===== Identify long-running node tasks -Long-running tasks can also cause a backlog. You can use the <> API to get information about the node tasks that are running. -Check the `running_time_in_nanos` to identify tasks that are taking an -excessive amount of time to complete. +Long-running tasks can also cause a backlog. Use the <> to check for excessive `running_time_in_nanos` values: [source,console] ---- GET /_tasks?pretty=true&human=true&detailed=true ---- -If a particular `action` is suspected, you can filter the tasks further. The most common long-running tasks are <>- or search-related. +You can filter on a specific `action`, such as <> or search-related tasks. +These tend to be long-running. -* Filter for <> actions: +* Filter on <> actions: + [source,console] ---- GET /_tasks?human&detailed&actions=indices:data/write/bulk ---- -* Filter for search actions: +* Filter on search actions: + [source,console] ---- GET /_tasks?human&detailed&actions=indices:data/write/search ---- -The API response may contain additional tasks columns, including `description` and `header`, which provides the task parameters, target, and requestor. You can use this information to perform further diagnosis. +Long-running tasks might need to be <>. -**Look for long running cluster tasks** +[discrete] +[[diagnose-task-queue-long-running-cluster-tasks]] +===== Look for long-running cluster tasks -A task backlog might also appear as a delay in synchronizing the cluster state. You -can use the <> to get information -about the pending cluster state sync tasks that are running. +Use the <> to identify delays +in cluster state synchronization: [source,console] ---- GET /_cluster/pending_tasks ---- -Check the `timeInQueue` to identify tasks that are taking an excessive amount -of time to complete. +Tasks with a high `timeInQueue` value are likely contributing to the backlog and might +need to be <>. [discrete] [[resolve-task-queue-backlog]] -==== Resolve a task queue backlog +==== Recommendations + +After identifying problematic threads and tasks, resolve the issue by increasing resources or canceling tasks. -**Increase available resources** +[discrete] +[[resolve-task-queue-backlog-resources]] +===== Increase available resources -If tasks are progressing slowly and the queue is backing up, -you might need to take steps to <>. +If tasks are progressing slowly, try <>. -In some cases, increasing the thread pool size might help. -For example, the `force_merge` thread pool defaults to a single thread. +In some cases, you might need to increase the thread pool size. For example, the `force_merge` thread pool defaults to a single thread. Increasing the size to 2 might help reduce a backlog of force merge requests. -**Cancel stuck tasks** +[discrete] +[[resolve-task-queue-backlog-stuck-tasks]] +===== Cancel stuck tasks + +If an active task's <> shows no progress, consider <>. + +[discrete] +[[resolve-task-queue-backlog-hotspotting]] +===== Address hot spotting + +If a specific node's thread pool is depleting faster than others, try addressing +uneven node resource utilization, also known as hot spotting. +For details on actions you can take, such as rebalancing shards, see <>. + +[discrete] +==== Resources + +Related symptoms: + +* <> +* <> +* <> -If you find the active task's hot thread isn't progressing and there's a backlog, -consider canceling the task. \ No newline at end of file +// TODO add link to standard Additional resources when that topic exists From c808137709bf76a07986d85cffa4dd9107d5327c Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 19 Dec 2024 16:10:44 +0100 Subject: [PATCH 48/62] Cleanup gradle init script used in ci (#119015) --- .ci/init.gradle | 101 +----------------------------------------------- 1 file changed, 2 insertions(+), 99 deletions(-) diff --git a/.ci/init.gradle b/.ci/init.gradle index 3e1f23804cf98..15d63f8ca7d20 100644 --- a/.ci/init.gradle +++ b/.ci/init.gradle @@ -1,95 +1,3 @@ -import com.bettercloud.vault.VaultConfig -import com.bettercloud.vault.Vault - -initscript { - repositories { - mavenCentral() - } - dependencies { - classpath 'com.bettercloud:vault-java-driver:4.1.0' - } -} - -boolean USE_ARTIFACTORY = false - -if (System.getenv('VAULT_ADDR') == null) { - // When trying to reproduce errors outside of CI, it can be useful to allow this to just return rather than blowing up - if (System.getenv('CI') == null) { - return - } - - throw new GradleException("You must set the VAULT_ADDR environment variable to use this init script.") -} - -if (System.getenv('VAULT_ROLE_ID') == null && System.getenv('VAULT_SECRET_ID') == null && System.getenv('VAULT_TOKEN') == null) { - // When trying to reproduce errors outside of CI, it can be useful to allow this to just return rather than blowing up - if (System.getenv('CI') == null) { - return - } - - throw new GradleException("You must set either the VAULT_ROLE_ID and VAULT_SECRET_ID environment variables, " + - "or the VAULT_TOKEN environment variable to use this init script.") -} - -final String vaultPathPrefix = System.getenv('VAULT_ADDR') ==~ /.+vault-ci.+\.dev.*/ ? "secret/ci/elastic-elasticsearch/migrated" : "secret/elasticsearch-ci" - -final String vaultToken = System.getenv('VAULT_TOKEN') ?: new Vault( - new VaultConfig() - .address(System.env.VAULT_ADDR) - .engineVersion(1) - .build() -) - .withRetries(5, 1000) - .auth() - .loginByAppRole("approle", System.env.VAULT_ROLE_ID, System.env.VAULT_SECRET_ID) - .getAuthClientToken() - -final Vault vault = new Vault( - new VaultConfig() - .address(System.env.VAULT_ADDR) - .engineVersion(1) - .token(vaultToken) - .build() -) - .withRetries(5, 1000) - - -if (USE_ARTIFACTORY) { - final Map artifactoryCredentials = vault.logical() - .read("${vaultPathPrefix}/artifactory.elstc.co") - .getData() - logger.info("Using elastic artifactory repos") - Closure configCache = { - return { - name "artifactory-gradle-release" - url "https://artifactory.elstc.co/artifactory/gradle-release" - credentials { - username artifactoryCredentials.get("username") - password artifactoryCredentials.get("token") - } - } - } - settingsEvaluated { settings -> - settings.pluginManagement { - repositories { - maven configCache() - } - } - } - projectsLoaded { - allprojects { - buildscript { - repositories { - maven configCache() - } - } - repositories { - maven configCache() - } - } - } -} - gradle.settingsEvaluated { settings -> settings.pluginManager.withPlugin("com.gradle.develocity") { settings.develocity { @@ -98,14 +6,10 @@ gradle.settingsEvaluated { settings -> } } - final String buildCacheUrl = System.getProperty('org.elasticsearch.build.cache.url') final boolean buildCachePush = Boolean.valueOf(System.getProperty('org.elasticsearch.build.cache.push', 'false')) if (buildCacheUrl) { - final Map buildCacheCredentials = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ? [:] : vault.logical() - .read("${vaultPathPrefix}/gradle-build-cache") - .getData() gradle.settingsEvaluated { settings -> settings.buildCache { local { @@ -116,11 +20,10 @@ if (buildCacheUrl) { url = buildCacheUrl push = buildCachePush credentials { - username = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ?: buildCacheCredentials.get("username") - password = System.getenv("GRADLE_BUILD_CACHE_PASSWORD") ?: buildCacheCredentials.get("password") + username = System.getenv("GRADLE_BUILD_CACHE_USERNAME") + password = System.getenv("GRADLE_BUILD_CACHE_PASSWORD") } } } } } - From 84f233a59cef93ae6c804bcfa7f5c6a6138d583e Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 19 Dec 2024 16:18:59 +0100 Subject: [PATCH 49/62] Allow archive and searchable snapshots indices in N-2 version (#118941) This change (along with the required change #118923 for 8.18) relaxes the index compatibility version checks to allow archive and searchable snapshot indices in version N-2 to exist on a 9.x cluster. Relates ES-10274 --- docs/changelog/118941.yaml | 5 + ...tractLuceneIndexCompatibilityTestCase.java | 57 ++++++- .../lucene/LuceneCompatibilityIT.java | 58 ++++--- .../SearchableSnapshotCompatibilityIT.java | 120 ++++++++------ .../coordination/NodeJoinExecutor.java | 42 +++-- .../metadata/IndexMetadataVerifier.java | 99 +++++++++--- .../metadata/MetadataIndexStateService.java | 7 +- .../elasticsearch/common/lucene/Lucene.java | 43 ++++-- .../gateway/GatewayMetaState.java | 6 +- .../gateway/LocalAllocateDangledIndices.java | 7 +- .../index/engine/ReadOnlyEngine.java | 5 +- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../org/elasticsearch/index/store/Store.java | 6 +- .../snapshots/RestoreService.java | 9 +- .../coordination/NodeJoinExecutorTests.java | 146 +++++++++++++++++- .../metadata/IndexMetadataVerifierTests.java | 82 +++++++++- .../gateway/GatewayMetaStateTests.java | 6 +- .../indices/cluster/ClusterStateChanges.java | 6 +- 18 files changed, 568 insertions(+), 138 deletions(-) create mode 100644 docs/changelog/118941.yaml diff --git a/docs/changelog/118941.yaml b/docs/changelog/118941.yaml new file mode 100644 index 0000000000000..4f0099bb32704 --- /dev/null +++ b/docs/changelog/118941.yaml @@ -0,0 +1,5 @@ +pr: 118941 +summary: Allow archive and searchable snapshots indices in N-2 version +area: Recovery +type: enhancement +issues: [] diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java index c42e879f84892..1865da06e20c5 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java @@ -15,6 +15,8 @@ import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import org.elasticsearch.client.Request; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -28,12 +30,15 @@ import java.util.Comparator; import java.util.Locale; +import java.util.stream.IntStream; import java.util.stream.Stream; import static org.elasticsearch.test.cluster.util.Version.CURRENT; import static org.elasticsearch.test.cluster.util.Version.fromString; import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; /** @@ -113,6 +118,12 @@ protected String suffix(String name) { return name + '-' + getTestName().split(" ")[0].toLowerCase(Locale.ROOT); } + protected Settings repositorySettings() { + return Settings.builder() + .put("location", REPOSITORY_PATH.getRoot().toPath().resolve(suffix("location")).toFile().getPath()) + .build(); + } + protected static Version clusterVersion() throws Exception { var response = assertOK(client().performRequest(new Request("GET", "/"))); var responseBody = createFromResponse(response); @@ -121,12 +132,56 @@ protected static Version clusterVersion() throws Exception { return version; } - protected static Version indexLuceneVersion(String indexName) throws Exception { + protected static Version indexVersion(String indexName) throws Exception { var response = assertOK(client().performRequest(new Request("GET", "/" + indexName + "/_settings"))); int id = Integer.parseInt(createFromResponse(response).evaluate(indexName + ".settings.index.version.created")); return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100)); } + protected static void indexDocs(String indexName, int numDocs) throws Exception { + var request = new Request("POST", "/_bulk"); + var docs = new StringBuilder(); + IntStream.range(0, numDocs).forEach(n -> docs.append(Strings.format(""" + {"index":{"_id":"%s","_index":"%s"}} + {"test":"test"} + """, n, indexName))); + request.setJsonEntity(docs.toString()); + var response = assertOK(client().performRequest(request)); + assertThat(entityAsMap(response).get("errors"), allOf(notNullValue(), is(false))); + } + + protected static void mountIndex(String repository, String snapshot, String indexName, boolean partial, String renamedIndexName) + throws Exception { + var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_mount"); + request.addParameter("wait_for_completion", "true"); + var storage = partial ? "shared_cache" : "full_copy"; + request.addParameter("storage", storage); + request.setJsonEntity(Strings.format(""" + { + "index": "%s", + "renamed_index": "%s" + }""", indexName, renamedIndexName)); + var responseBody = createFromResponse(client().performRequest(request)); + assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.successful"))); + assertThat(responseBody.evaluate("snapshot.shards.failed"), equalTo(0)); + } + + protected static void restoreIndex(String repository, String snapshot, String indexName, String renamedIndexName) throws Exception { + var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity(org.elasticsearch.common.Strings.format(""" + { + "indices": "%s", + "include_global_state": false, + "rename_pattern": "(.+)", + "rename_replacement": "%s", + "include_aliases": false + }""", indexName, renamedIndexName)); + var responseBody = createFromResponse(client().performRequest(request)); + assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); + assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + } + /** * Execute the test suite with the parameters provided by the {@link #parameters()} in version order. */ diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java index d6dd949b843d6..655e30f069f18 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java @@ -10,20 +10,18 @@ package org.elasticsearch.lucene; import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.cluster.util.Version; -import java.util.stream.IntStream; - -import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; public class LuceneCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase { @@ -35,22 +33,19 @@ public LuceneCompatibilityIT(Version version) { super(version); } + /** + * Creates an index and a snapshot on N-2, then restores the snapshot on N. + */ public void testRestoreIndex() throws Exception { final String repository = suffix("repository"); final String snapshot = suffix("snapshot"); final String index = suffix("index"); final int numDocs = 1234; - logger.debug("--> registering repository [{}]", repository); - registerRepository( - client(), - repository, - FsRepository.TYPE, - true, - Settings.builder().put("location", REPOSITORY_PATH.getRoot().getPath()).build() - ); - if (VERSION_MINUS_2.equals(clusterVersion())) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + logger.debug("--> creating index [{}]", index); createIndex( client(), @@ -63,17 +58,7 @@ public void testRestoreIndex() throws Exception { ); logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); - final var bulks = new StringBuilder(); - IntStream.range(0, numDocs).forEach(n -> bulks.append(Strings.format(""" - {"index":{"_id":"%s","_index":"%s"}} - {"test":"test"} - """, n, index))); - - var bulkRequest = new Request("POST", "/_bulk"); - bulkRequest.setJsonEntity(bulks.toString()); - var bulkResponse = client().performRequest(bulkRequest); - assertOK(bulkResponse); - assertThat(entityAsMap(bulkResponse).get("errors"), allOf(notNullValue(), is(false))); + indexDocs(index, numDocs); logger.debug("--> creating snapshot [{}]", snapshot); createSnapshot(client(), repository, snapshot, true); @@ -83,7 +68,7 @@ public void testRestoreIndex() throws Exception { if (VERSION_MINUS_1.equals(clusterVersion())) { ensureGreen(index); - assertThat(indexLuceneVersion(index), equalTo(VERSION_MINUS_2)); + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); assertDocCount(client(), index, numDocs); logger.debug("--> deleting index [{}]", index); @@ -93,9 +78,9 @@ public void testRestoreIndex() throws Exception { if (VERSION_CURRENT.equals(clusterVersion())) { var restoredIndex = suffix("index-restored"); - logger.debug("--> restoring index [{}] as archive [{}]", index, restoredIndex); + logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex); - // Restoring the archive will fail as Elasticsearch does not support reading N-2 yet + // Restoring the index will fail as Elasticsearch does not support reading N-2 yet var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); request.addParameter("wait_for_completion", "true"); request.setJsonEntity(Strings.format(""" @@ -106,9 +91,20 @@ public void testRestoreIndex() throws Exception { "rename_replacement": "%s", "include_aliases": false }""", index, restoredIndex)); - var responseBody = createFromResponse(client().performRequest(request)); - assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); - assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + + var responseException = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); + assertThat( + responseException.getMessage(), + allOf( + containsString("cannot restore index [[" + index), + containsString("because it cannot be upgraded"), + containsString("has current compatibility version [" + VERSION_MINUS_2 + '-' + VERSION_MINUS_1.getMajor() + ".0.0]"), + containsString("but the minimum compatible version is [" + VERSION_MINUS_1.getMajor() + ".0.0]."), + containsString("It should be re-indexed in Elasticsearch " + VERSION_MINUS_1.getMajor() + ".x"), + containsString("before upgrading to " + VERSION_CURRENT) + ) + ); } } } diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java index 4f348b7fb122f..d5db17f257b0c 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java @@ -9,21 +9,13 @@ package org.elasticsearch.lucene; -import org.elasticsearch.client.Request; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.test.cluster.util.Version; -import java.util.stream.IntStream; - -import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; -import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase { @@ -37,24 +29,19 @@ public SearchableSnapshotCompatibilityIT(Version version) { super(version); } - // TODO Add a test to mount the N-2 index on N-1 and then search it on N - + /** + * Creates an index and a snapshot on N-2, then mounts the snapshot on N. + */ public void testSearchableSnapshot() throws Exception { final String repository = suffix("repository"); final String snapshot = suffix("snapshot"); final String index = suffix("index"); final int numDocs = 1234; - logger.debug("--> registering repository [{}]", repository); - registerRepository( - client(), - repository, - FsRepository.TYPE, - true, - Settings.builder().put("location", REPOSITORY_PATH.getRoot().getPath()).build() - ); - if (VERSION_MINUS_2.equals(clusterVersion())) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + logger.debug("--> creating index [{}]", index); createIndex( client(), @@ -67,17 +54,7 @@ public void testSearchableSnapshot() throws Exception { ); logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); - final var bulks = new StringBuilder(); - IntStream.range(0, numDocs).forEach(n -> bulks.append(Strings.format(""" - {"index":{"_id":"%s","_index":"%s"}} - {"test":"test"} - """, n, index))); - - var bulkRequest = new Request("POST", "/_bulk"); - bulkRequest.setJsonEntity(bulks.toString()); - var bulkResponse = client().performRequest(bulkRequest); - assertOK(bulkResponse); - assertThat(entityAsMap(bulkResponse).get("errors"), allOf(notNullValue(), is(false))); + indexDocs(index, numDocs); logger.debug("--> creating snapshot [{}]", snapshot); createSnapshot(client(), repository, snapshot, true); @@ -87,7 +64,7 @@ public void testSearchableSnapshot() throws Exception { if (VERSION_MINUS_1.equals(clusterVersion())) { ensureGreen(index); - assertThat(indexLuceneVersion(index), equalTo(VERSION_MINUS_2)); + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); assertDocCount(client(), index, numDocs); logger.debug("--> deleting index [{}]", index); @@ -98,20 +75,75 @@ public void testSearchableSnapshot() throws Exception { if (VERSION_CURRENT.equals(clusterVersion())) { var mountedIndex = suffix("index-mounted"); logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); + + ensureGreen(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + + logger.debug("--> adding replica to test peer-recovery"); + updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(mountedIndex); + } + } + + /** + * Creates an index and a snapshot on N-2, mounts the snapshot on N -1 and then upgrades to N. + */ + public void testSearchableSnapshotUpgrade() throws Exception { + final String mountedIndex = suffix("index-mounted"); + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 4321; + + if (VERSION_MINUS_2.equals(clusterVersion())) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + + if (VERSION_MINUS_1.equals(clusterVersion())) { + logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); + + ensureGreen(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + return; + } + + if (VERSION_CURRENT.equals(clusterVersion())) { + ensureGreen(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); - // Mounting the index will fail as Elasticsearch does not support reading N-2 yet - var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_mount"); - request.addParameter("wait_for_completion", "true"); - var storage = randomBoolean() ? "shared_cache" : "full_copy"; - request.addParameter("storage", storage); - request.setJsonEntity(Strings.format(""" - { - "index": "%s", - "renamed_index": "%s" - }""", index, mountedIndex)); - var responseBody = createFromResponse(client().performRequest(request)); - assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); - assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + logger.debug("--> adding replica to test peer-recovery"); + updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(mountedIndex); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index 74a8dc7851c89..916a192d53871 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -49,6 +49,7 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isReadOnlySupportedVersion; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; public class NodeJoinExecutor implements ClusterStateTaskExecutor { @@ -179,7 +180,12 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex Set newNodeEffectiveFeatures = enforceNodeFeatureBarrier(node, effectiveClusterFeatures, features); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster - ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); + ensureIndexCompatibility( + node.getMinIndexVersion(), + node.getMinReadOnlyIndexVersion(), + node.getMaxIndexVersion(), + initialState.getMetadata() + ); nodesBuilder.add(node); compatibilityVersionsMap.put(node.getId(), compatibilityVersions); @@ -394,9 +400,15 @@ private Set calculateEffectiveClusterFeatures(DiscoveryNodes nodes, Map< * will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index * compatibility version. * @see IndexVersions#MINIMUM_COMPATIBLE + * @see IndexVersions#MINIMUM_READONLY_COMPATIBLE * @throws IllegalStateException if any index is incompatible with the given version */ - public static void ensureIndexCompatibility(IndexVersion minSupportedVersion, IndexVersion maxSupportedVersion, Metadata metadata) { + public static void ensureIndexCompatibility( + IndexVersion minSupportedVersion, + IndexVersion minReadOnlySupportedVersion, + IndexVersion maxSupportedVersion, + Metadata metadata + ) { // we ensure that all indices in the cluster we join are compatible with us no matter if they are // closed or not we can't read mappings of these indices so we need to reject the join... for (IndexMetadata idxMetadata : metadata) { @@ -411,14 +423,17 @@ public static void ensureIndexCompatibility(IndexVersion minSupportedVersion, In ); } if (idxMetadata.getCompatibilityVersion().before(minSupportedVersion)) { - throw new IllegalStateException( - "index " - + idxMetadata.getIndex() - + " version not supported: " - + idxMetadata.getCompatibilityVersion().toReleaseVersion() - + " minimum compatible index version is: " - + minSupportedVersion.toReleaseVersion() - ); + boolean isReadOnlySupported = isReadOnlySupportedVersion(idxMetadata, minSupportedVersion, minReadOnlySupportedVersion); + if (isReadOnlySupported == false) { + throw new IllegalStateException( + "index " + + idxMetadata.getIndex() + + " version not supported: " + + idxMetadata.getCompatibilityVersion().toReleaseVersion() + + " minimum compatible index version is: " + + minSupportedVersion.toReleaseVersion() + ); + } } } } @@ -542,7 +557,12 @@ public static Collection> addBuiltInJoin final Collection> validators = new ArrayList<>(); validators.add((node, state) -> { ensureNodesCompatibility(node.getVersion(), state.getNodes()); - ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), state.getMetadata()); + ensureIndexCompatibility( + node.getMinIndexVersion(), + node.getMinReadOnlyIndexVersion(), + node.getMaxIndexVersion(), + state.getMetadata() + ); }); validators.addAll(onJoinValidators); return Collections.unmodifiableCollection(validators); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java index 0b8095c24519f..be2563c4732b7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java @@ -88,8 +88,12 @@ public IndexMetadataVerifier( * If the index does not need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index * cannot be updated the method throws an exception. */ - public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { - checkSupportedVersion(indexMetadata, minimumIndexCompatibilityVersion); + public IndexMetadata verifyIndexMetadata( + IndexMetadata indexMetadata, + IndexVersion minimumIndexCompatibilityVersion, + IndexVersion minimumReadOnlyIndexCompatibilityVersion + ) { + checkSupportedVersion(indexMetadata, minimumIndexCompatibilityVersion, minimumReadOnlyIndexCompatibilityVersion); // First convert any shared_cache searchable snapshot indices to only use _tier_preference: data_frozen IndexMetadata newMetadata = convertSharedCacheTierPreference(indexMetadata); @@ -105,26 +109,81 @@ public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, IndexVersi } /** - * Check that the index version is compatible. Elasticsearch does not support indices created before the - * previous major version. + * Check that the index version is compatible. Elasticsearch supports reading and writing indices created in the current version ("N") + + as well as the previous major version ("N-1"). Elasticsearch only supports reading indices created down to the penultimate version + + ("N-2") and does not support reading nor writing any version below that. */ - private static void checkSupportedVersion(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { - boolean isSupportedVersion = indexMetadata.getCompatibilityVersion().onOrAfter(minimumIndexCompatibilityVersion); - if (isSupportedVersion == false) { - throw new IllegalStateException( - "The index " - + indexMetadata.getIndex() - + " has current compatibility version [" - + indexMetadata.getCompatibilityVersion().toReleaseVersion() - + "] but the minimum compatible version is [" - + minimumIndexCompatibilityVersion.toReleaseVersion() - + "]. It should be re-indexed in Elasticsearch " - + (Version.CURRENT.major - 1) - + ".x before upgrading to " - + Build.current().version() - + "." - ); + private static void checkSupportedVersion( + IndexMetadata indexMetadata, + IndexVersion minimumIndexCompatibilityVersion, + IndexVersion minimumReadOnlyIndexCompatibilityVersion + ) { + if (isFullySupportedVersion(indexMetadata, minimumIndexCompatibilityVersion)) { + return; + } + if (isReadOnlySupportedVersion(indexMetadata, minimumIndexCompatibilityVersion, minimumReadOnlyIndexCompatibilityVersion)) { + return; + } + throw new IllegalStateException( + "The index " + + indexMetadata.getIndex() + + " has current compatibility version [" + + indexMetadata.getCompatibilityVersion().toReleaseVersion() + + "] but the minimum compatible version is [" + + minimumIndexCompatibilityVersion.toReleaseVersion() + + "]. It should be re-indexed in Elasticsearch " + + (Version.CURRENT.major - 1) + + ".x before upgrading to " + + Build.current().version() + + "." + ); + } + + private static boolean isFullySupportedVersion(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { + return indexMetadata.getCompatibilityVersion().onOrAfter(minimumIndexCompatibilityVersion); + } + + /** + * Returns {@code true} if the index version is compatible in read-only mode. As of today, only searchable snapshots and archive indices + * in version N-2 with a write block are read-only compatible. This method throws an {@link IllegalStateException} if the index is + * either a searchable snapshot or an archive index with a read-only compatible version but is missing the write block. + * + * @param indexMetadata the index metadata + * @param minimumIndexCompatibilityVersion the min. index compatible version for reading and writing indices (used in assertion) + * @param minReadOnlyIndexCompatibilityVersion the min. index compatible version for only reading indices + * + * @return {@code true} if the index version is compatible in read-only mode, {@code false} otherwise. + * @throws IllegalStateException if the index is read-only compatible but has no write block in place. + */ + public static boolean isReadOnlySupportedVersion( + IndexMetadata indexMetadata, + IndexVersion minimumIndexCompatibilityVersion, + IndexVersion minReadOnlyIndexCompatibilityVersion + ) { + boolean isReadOnlySupportedVersion = indexMetadata.getCompatibilityVersion().onOrAfter(minReadOnlyIndexCompatibilityVersion); + assert isFullySupportedVersion(indexMetadata, minimumIndexCompatibilityVersion) == false; + + if (isReadOnlySupportedVersion + && (indexMetadata.isSearchableSnapshot() || indexMetadata.getCreationVersion().isLegacyIndexVersion())) { + boolean isReadOnly = IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(indexMetadata.getSettings()); + if (isReadOnly == false) { + throw new IllegalStateException( + "The index " + + indexMetadata.getIndex() + + " created in version [" + + indexMetadata.getCreationVersion() + + "] with current compatibility version [" + + indexMetadata.getCompatibilityVersion().toReleaseVersion() + + "] must be marked as read-only using the setting [" + + IndexMetadata.SETTING_BLOCKS_WRITE + + "] set to [true] before upgrading to " + + Build.current().version() + + '.' + ); + } + return true; } + return false; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index 0c33878b01229..95d1c37ec41ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -1120,6 +1120,7 @@ private ClusterState openIndices(final Index[] indices, final ClusterState curre final Metadata.Builder metadata = Metadata.builder(currentState.metadata()); final ClusterBlocks.Builder blocks = ClusterBlocks.builder(currentState.blocks()); final IndexVersion minIndexCompatibilityVersion = currentState.getNodes().getMinSupportedIndexVersion(); + final IndexVersion minReadOnlyIndexCompatibilityVersion = currentState.getNodes().getMinReadOnlySupportedIndexVersion(); for (IndexMetadata indexMetadata : indicesToOpen) { final Index index = indexMetadata.getIndex(); @@ -1137,7 +1138,11 @@ private ClusterState openIndices(final Index[] indices, final ClusterState curre // The index might be closed because we couldn't import it due to an old incompatible // version, so we need to verify its compatibility. - newIndexMetadata = indexMetadataVerifier.verifyIndexMetadata(newIndexMetadata, minIndexCompatibilityVersion); + newIndexMetadata = indexMetadataVerifier.verifyIndexMetadata( + newIndexMetadata, + minIndexCompatibilityVersion, + minReadOnlyIndexCompatibilityVersion + ); try { indicesService.verifyIndexMetadata(newIndexMetadata, newIndexMetadata); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index a57b8b4d23cdb..bd48572a8bc11 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -70,6 +70,8 @@ import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -87,6 +89,8 @@ import java.util.Map; import java.util.Objects; +import static org.apache.lucene.util.Version.LUCENE_10_0_0; + public class Lucene { public static final String LATEST_CODEC = "Lucene100"; @@ -109,13 +113,6 @@ public class Lucene { private Lucene() {} - /** - * Reads the segments infos, failing if it fails to load - */ - public static SegmentInfos readSegmentInfos(Directory directory) throws IOException { - return SegmentInfos.readLatestCommit(directory); - } - /** * Returns an iterable that allows to iterate over all files in this segments info */ @@ -139,21 +136,45 @@ public static int getNumDocs(SegmentInfos info) { return numDocs; } + /** + * Reads the segments infos, failing if it fails to load + */ + public static SegmentInfos readSegmentInfos(Directory directory) throws IOException { + return SegmentInfos.readLatestCommit(directory, IndexVersions.MINIMUM_READONLY_COMPATIBLE.luceneVersion().major); + } + /** * Reads the segments infos from the given commit, failing if it fails to load */ public static SegmentInfos readSegmentInfos(IndexCommit commit) throws IOException { - // Using commit.getSegmentsFileName() does NOT work here, have to - // manually create the segment filename + // Using commit.getSegmentsFileName() does NOT work here, have to manually create the segment filename String filename = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", commit.getGeneration()); - return SegmentInfos.readCommit(commit.getDirectory(), filename); + return readSegmentInfos(filename, commit.getDirectory()); } /** * Reads the segments infos from the given segments file name, failing if it fails to load */ private static SegmentInfos readSegmentInfos(String segmentsFileName, Directory directory) throws IOException { - return SegmentInfos.readCommit(directory, segmentsFileName); + // TODO Use readCommit(Directory directory, String segmentFileName, int minSupportedMajorVersion) once Lucene 10.1 is available + // and remove the try-catch block for IndexFormatTooOldException + assert IndexVersion.current().luceneVersion().equals(LUCENE_10_0_0) : "remove the try-catch block below"; + try { + return SegmentInfos.readCommit(directory, segmentsFileName); + } catch (IndexFormatTooOldException e) { + try { + // Temporary workaround until Lucene 10.1 is available: try to leverage min. read-only compatibility to read the last commit + // and then check if this is the commit we want. This should always work for the case we are interested in (archive and + // searchable snapshots indices in N-2 version) as no newer commit should be ever written. + var segmentInfos = readSegmentInfos(directory); + if (segmentsFileName.equals(segmentInfos.getSegmentsFileName())) { + return segmentInfos; + } + } catch (Exception suppressed) { + e.addSuppressed(suppressed); + } + throw e; + } } /** diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index bf2387453145d..6038a83130db5 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -295,7 +295,11 @@ static Metadata upgradeMetadata(Metadata metadata, IndexMetadataVerifier indexMe boolean changed = false; final Metadata.Builder upgradedMetadata = Metadata.builder(metadata); for (IndexMetadata indexMetadata : metadata) { - IndexMetadata newMetadata = indexMetadataVerifier.verifyIndexMetadata(indexMetadata, IndexVersions.MINIMUM_COMPATIBLE); + IndexMetadata newMetadata = indexMetadataVerifier.verifyIndexMetadata( + indexMetadata, + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE + ); changed |= indexMetadata != newMetadata; upgradedMetadata.put(newMetadata, false); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index a15fef653dabe..9359f6e377ef4 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -125,6 +125,7 @@ public ClusterState execute(ClusterState currentState) { currentState.routingTable() ); IndexVersion minIndexCompatibilityVersion = currentState.nodes().getMinSupportedIndexVersion(); + IndexVersion minReadOnlyIndexCompatibilityVersion = currentState.nodes().getMinReadOnlySupportedIndexVersion(); IndexVersion maxIndexCompatibilityVersion = currentState.nodes().getMaxDataNodeCompatibleIndexVersion(); boolean importNeeded = false; StringBuilder sb = new StringBuilder(); @@ -176,7 +177,11 @@ public ClusterState execute(ClusterState currentState) { try { // The dangled index might be from an older version, we need to make sure it's compatible // with the current version. - newIndexMetadata = indexMetadataVerifier.verifyIndexMetadata(indexMetadata, minIndexCompatibilityVersion); + newIndexMetadata = indexMetadataVerifier.verifyIndexMetadata( + indexMetadata, + minIndexCompatibilityVersion, + minReadOnlyIndexCompatibilityVersion + ); newIndexMetadata = IndexMetadata.builder(newIndexMetadata) .settings( Settings.builder() diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 1d032c1f400ef..c3ab2ee910805 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -220,7 +220,7 @@ protected DirectoryReader open(IndexCommit commit) throws IOException { assert Transports.assertNotTransportThread("opening index commit of a read-only engine"); DirectoryReader directoryReader = DirectoryReader.open( commit, - org.apache.lucene.util.Version.MIN_SUPPORTED_MAJOR, + IndexVersions.MINIMUM_READONLY_COMPATIBLE.luceneVersion().major, engineConfig.getLeafSorter() ); if (lazilyLoadSoftDeletes) { @@ -575,7 +575,8 @@ public void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary) { protected DirectoryReader openDirectory(Directory directory) throws IOException { assert Transports.assertNotTransportThread("opening directory reader of a read-only engine"); - final DirectoryReader reader = DirectoryReader.open(directory); + var commit = Lucene.getIndexCommit(Lucene.readSegmentInfos(directory), directory); + final DirectoryReader reader = DirectoryReader.open(commit, IndexVersions.MINIMUM_READONLY_COMPATIBLE.luceneVersion().major, null); if (lazilyLoadSoftDeletes) { return new LazySoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); } else { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 966764d2797c9..2c9741a87c335 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2206,7 +2206,7 @@ private Engine createEngine(EngineConfig config) { * Asserts that the latest Lucene commit contains expected information about sequence numbers or ES version. */ private boolean assertLastestCommitUserData() throws IOException { - final SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(store.directory()); + final SegmentInfos segmentCommitInfos = store.readLastCommittedSegmentsInfo(); final Map userData = segmentCommitInfos.getUserData(); // Ensure sequence numbers are present in commit data assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint"; diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 322064f09cf77..64bbd15198b4b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -819,12 +819,12 @@ public record MetadataSnapshot(Map fileMetadataMap, M public static final MetadataSnapshot EMPTY = new MetadataSnapshot(emptyMap(), emptyMap(), 0L); - static MetadataSnapshot loadFromIndexCommit(IndexCommit commit, Directory directory, Logger logger) throws IOException { + static MetadataSnapshot loadFromIndexCommit(@Nullable IndexCommit commit, Directory directory, Logger logger) throws IOException { final long numDocs; final Map metadataByFile = new HashMap<>(); final Map commitUserData; try { - final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory); + final SegmentInfos segmentCommitInfos = readSegmentsInfo(commit, directory); numDocs = Lucene.getNumDocs(segmentCommitInfos); commitUserData = Map.copyOf(segmentCommitInfos.getUserData()); // we don't know which version was used to write so we take the max version. @@ -1449,7 +1449,6 @@ public void bootstrapNewHistory() throws IOException { * @see SequenceNumbers#MAX_SEQ_NO */ public void bootstrapNewHistory(long localCheckpoint, long maxSeqNo) throws IOException { - assert indexSettings.getIndexMetadata().isSearchableSnapshot() == false; metadataLock.writeLock().lock(); try (IndexWriter writer = newTemporaryAppendingIndexWriter(directory, null)) { final Map map = new HashMap<>(); @@ -1573,7 +1572,6 @@ private IndexWriter newTemporaryEmptyIndexWriter(final Directory dir, final Vers } private IndexWriterConfig newTemporaryIndexWriterConfig() { - assert indexSettings.getIndexMetadata().isSearchableSnapshot() == false; // this config is only used for temporary IndexWriter instances, used to initialize the index or update the commit data, // so we don't want any merges to happen var iwc = indexWriterConfigWithNoMerging(null).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD).setCommitOnClose(false); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index ddb1e3d384fbe..5aec8e0e3253c 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -1348,6 +1348,7 @@ public ClusterState execute(ClusterState currentState) { final Map shards = new HashMap<>(); final IndexVersion minIndexCompatibilityVersion = currentState.getNodes().getMinSupportedIndexVersion(); + final IndexVersion minReadOnlyIndexCompatibilityVersion = currentState.getNodes().getMinReadOnlySupportedIndexVersion(); final String localNodeId = clusterService.state().nodes().getLocalNodeId(); for (Map.Entry indexEntry : indicesToRestore.entrySet()) { final IndexId index = indexEntry.getValue(); @@ -1360,12 +1361,16 @@ public ClusterState execute(ClusterState currentState) { request.indexSettings(), request.ignoreIndexSettings() ); - if (snapshotIndexMetadata.getCompatibilityVersion().before(minIndexCompatibilityVersion)) { + if (snapshotIndexMetadata.getCompatibilityVersion().isLegacyIndexVersion()) { // adapt index metadata so that it can be understood by current version snapshotIndexMetadata = convertLegacyIndex(snapshotIndexMetadata, currentState, indicesService); } try { - snapshotIndexMetadata = indexMetadataVerifier.verifyIndexMetadata(snapshotIndexMetadata, minIndexCompatibilityVersion); + snapshotIndexMetadata = indexMetadataVerifier.verifyIndexMetadata( + snapshotIndexMetadata, + minIndexCompatibilityVersion, + minReadOnlyIndexCompatibilityVersion + ); } catch (Exception ex) { throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 34add82e66557..270315f23a53c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; @@ -58,6 +59,7 @@ import static org.elasticsearch.cluster.metadata.DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect; import static org.elasticsearch.cluster.metadata.DesiredNodesTestCase.randomDesiredNode; +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomVersion; @@ -89,12 +91,18 @@ public void testPreventJoinClusterWithNewerIndices() { .build(); metaBuilder.put(indexMetadata, false); Metadata metadata = metaBuilder.build(); - NodeJoinExecutor.ensureIndexCompatibility(IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current(), metadata); + NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + metadata + ); expectThrows( IllegalStateException.class, () -> NodeJoinExecutor.ensureIndexCompatibility( IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersionUtils.getPreviousVersion(IndexVersion.current()), metadata ) @@ -113,10 +121,136 @@ public void testPreventJoinClusterWithUnsupportedIndices() { Metadata metadata = metaBuilder.build(); expectThrows( IllegalStateException.class, - () -> NodeJoinExecutor.ensureIndexCompatibility(IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current(), metadata) + () -> NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + metadata + ) ); } + public void testJoinClusterWithReadOnlyCompatibleIndices() { + { + var indexMetadata = IndexMetadata.builder("searchable-snapshot") + .settings( + Settings.builder() + .put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + ) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ); + } + { + var indexMetadata = IndexMetadata.builder("searchable-snapshot-no-write-block") + .settings( + Settings.builder() + .put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + ) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + expectThrows( + IllegalStateException.class, + () -> NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ) + ); + } + { + var indexMetadata = IndexMetadata.builder("archive") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(randomFrom(5000099, 6000099))) + .put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + ) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ); + } + { + var indexMetadata = IndexMetadata.builder("archive-no-write-block") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(randomFrom(5000099, 6000099))) + .put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + ) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + expectThrows( + IllegalStateException.class, + () -> NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ) + ); + } + { + var indexMetadata = IndexMetadata.builder("legacy") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(randomFrom(5000099, 6000099)))) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + expectThrows( + IllegalStateException.class, + () -> NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ) + ); + } + { + var indexMetadata = IndexMetadata.builder("read-only-compatible-but-unsupported") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + ) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + expectThrows( + IllegalStateException.class, + () -> NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ) + ); + } + } + public void testPreventJoinClusterWithUnsupportedNodeVersions() { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); final Version version = randomCompatibleVersion(random(), Version.CURRENT); @@ -467,7 +601,13 @@ public void testSuccess() { .build(); metaBuilder.put(indexMetadata, false); Metadata metadata = metaBuilder.build(); - NodeJoinExecutor.ensureIndexCompatibility(IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), metadata); + NodeJoinExecutor.ensureIndexCompatibility( + // randomCompatibleVersionSettings() can set a version as low as MINIMUM_READONLY_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + metadata + ); } public static Settings.Builder randomCompatibleVersionSettings() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index 6ee86470861b4..417ae89da0a69 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -18,11 +18,13 @@ import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import java.util.Collections; +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.hamcrest.Matchers.equalTo; public class IndexMetadataVerifierTests extends ESTestCase { @@ -97,7 +99,8 @@ public void testCustomSimilarity() { .put("index.similarity.my_similarity.after_effect", "l") .build() ); - service.verifyIndexMetadata(src, IndexVersions.MINIMUM_READONLY_COMPATIBLE); + // The random IndexMetadata.SETTING_VERSION_CREATED in IndexMetadata can be as low as MINIMUM_READONLY_COMPATIBLE + service.verifyIndexMetadata(src, IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } public void testIncompatibleVersion() { @@ -110,7 +113,7 @@ public void testIncompatibleVersion() { ); String message = expectThrows( IllegalStateException.class, - () -> service.verifyIndexMetadata(metadata, IndexVersions.MINIMUM_COMPATIBLE) + () -> service.verifyIndexMetadata(metadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) ).getMessage(); assertThat( message, @@ -132,7 +135,80 @@ public void testIncompatibleVersion() { indexCreated = IndexVersionUtils.randomVersionBetween(random(), minCompat, IndexVersion.current()); IndexMetadata goodMeta = newIndexMeta("foo", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated).build()); - service.verifyIndexMetadata(goodMeta, IndexVersions.MINIMUM_COMPATIBLE); + service.verifyIndexMetadata(goodMeta, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); + } + + public void testReadOnlyVersionCompatibility() { + var service = getIndexMetadataVerifier(); + var indexCreated = IndexVersions.MINIMUM_READONLY_COMPATIBLE; + { + var idxMetadata = newIndexMeta( + "not-searchable-snapshot", + Settings.builder() + .put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()) + .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) + .build() + ); + String message = expectThrows( + IllegalStateException.class, + () -> service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + ).getMessage(); + assertThat( + message, + equalTo( + "The index [not-searchable-snapshot/" + + idxMetadata.getIndexUUID() + + "] has current compatibility version [" + + indexCreated.toReleaseVersion() + + "] " + + "but the minimum compatible version is [" + + IndexVersions.MINIMUM_COMPATIBLE.toReleaseVersion() + + "]. It should be re-indexed in Elasticsearch " + + (Version.CURRENT.major - 1) + + ".x before upgrading to " + + Build.current().version() + + "." + ) + ); + } + { + var idxMetadata = newIndexMeta( + "not-read-only", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) + .put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE) + .build() + ); + String message = expectThrows( + IllegalStateException.class, + () -> service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + ).getMessage(); + assertThat( + message, + equalTo( + "The index [not-read-only/" + + idxMetadata.getIndexUUID() + + "] created in version [" + + indexCreated + + "] with current compatibility version [" + + indexCreated.toReleaseVersion() + + "] must be marked as read-only using the setting [index.blocks.write] set to [true] before upgrading to " + + Build.current().version() + + "." + ) + ); + } + { + var idxMetadata = newIndexMeta( + "good", + Settings.builder() + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) + .put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE) + .build() + ); + service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); + } } private IndexMetadataVerifier getIndexMetadataVerifier() { diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index a161794e35b91..fd0718e5280fe 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -264,7 +264,11 @@ private static class MockIndexMetadataVerifier extends IndexMetadataVerifier { } @Override - public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { + public IndexMetadata verifyIndexMetadata( + IndexMetadata indexMetadata, + IndexVersion minimumIndexCompatibilityVersion, + IndexVersion minimumReadOnlyIndexCompatibilityVersion + ) { return upgrade ? IndexMetadata.builder(indexMetadata).build() : indexMetadata; } } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 82154848ea039..39c327ddee228 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -252,7 +252,11 @@ public Transport.Connection getConnection(DiscoveryNode node) { ) { // metadata upgrader should do nothing @Override - public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { + public IndexMetadata verifyIndexMetadata( + IndexMetadata indexMetadata, + IndexVersion minimumIndexCompatibilityVersion, + IndexVersion minimumReadOnlyIndexCompatibilityVersion + ) { return indexMetadata; } }; From ee163eb6be25b7d0abf4bbfb54d006d0ddcf1e9f Mon Sep 17 00:00:00 2001 From: John Wagster Date: Thu, 19 Dec 2024 09:28:29 -0600 Subject: [PATCH 50/62] Epoch Millis Rounding Down and Not Up (#118353) fixed an issue where epoch millis were not being rounded up but instead were rounded down causing gt behavior to fail when off by 1 millisecond --- docs/changelog/118353.yaml | 5 + .../elasticsearch/common/time/EpochTime.java | 107 +++++-- .../common/time/JavaDateFormatter.java | 36 ++- .../common/time/DateFormattersTests.java | 15 + .../common/time/EpochTimeTests.java | 291 ++++++++++++++++++ 5 files changed, 429 insertions(+), 25 deletions(-) create mode 100644 docs/changelog/118353.yaml create mode 100644 server/src/test/java/org/elasticsearch/common/time/EpochTimeTests.java diff --git a/docs/changelog/118353.yaml b/docs/changelog/118353.yaml new file mode 100644 index 0000000000000..7be62a4a60c7e --- /dev/null +++ b/docs/changelog/118353.yaml @@ -0,0 +1,5 @@ +pr: 118353 +summary: Epoch Millis Rounding Down and Not Up 2 +area: Infra/Core +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index c53c9d0c03df3..51b28fd97e245 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -35,6 +35,25 @@ class EpochTime { private static final ValueRange POSITIVE_LONG_INTEGER_RANGE = ValueRange.of(0, Long.MAX_VALUE); + // TemporalField is only present in the presence of a rounded timestamp + private static final long ROUNDED_SIGN_PLACEHOLDER = -2; + private static final EpochField ROUNDED_SIGN_FIELD = new EpochField( + ChronoUnit.FOREVER, + ChronoUnit.FOREVER, + ValueRange.of(ROUNDED_SIGN_PLACEHOLDER, ROUNDED_SIGN_PLACEHOLDER) + ) { + // FIXME: what should this be? + @Override + public boolean isSupportedBy(TemporalAccessor temporal) { + return temporal.isSupported(ChronoField.INSTANT_SECONDS) && temporal.getLong(ChronoField.INSTANT_SECONDS) < 0; + } + + @Override + public long getFrom(TemporalAccessor temporal) { + return ROUNDED_SIGN_PLACEHOLDER; + } + }; + // TemporalField is only present in the presence of a negative (potentially fractional) timestamp. private static final long NEGATIVE_SIGN_PLACEHOLDER = -1; private static final EpochField NEGATIVE_SIGN_FIELD = new EpochField( @@ -161,6 +180,10 @@ public TemporalAccessor resolve( Long nanosOfMilli = fieldValues.remove(NANOS_OF_MILLI); long secondsAndMillis = fieldValues.remove(this); + // this flag indicates whether we were asked to round up and we defaulted to 999_999 nanos or nanos were given by the users + // specifically we do not wnat to confuse defaulted 999_999 nanos with user supplied 999_999 nanos + boolean roundUp = fieldValues.remove(ROUNDED_SIGN_FIELD) != null; + long seconds; long nanos; if (isNegative != null) { @@ -169,10 +192,18 @@ public TemporalAccessor resolve( nanos = secondsAndMillis % 1000 * 1_000_000; // `secondsAndMillis < 0` implies negative timestamp; so `nanos < 0` if (nanosOfMilli != null) { - // aggregate fractional part of the input; subtract b/c `nanos < 0` - nanos -= nanosOfMilli; + if (roundUp) { + // these are not the nanos you think they are; these are "round up nanos" not the fractional part of the input + // this is the case where we defaulted the value to 999_999 and the intention for rounding is that the value + // moves closer to positive infinity + nanos += nanosOfMilli; + } else { + // aggregate fractional part of the input; subtract b/c `nanos < 0` + // this is the case where the user has supplied a nanos value and we'll want to shift toward negative infinity + nanos -= nanosOfMilli; + } } - if (nanos != 0) { + if (nanos < 0) { // nanos must be positive. B/c the timestamp is represented by the // (seconds, nanos) tuple, seconds moves 1s toward negative-infinity // and nanos moves 1s toward positive-infinity @@ -235,38 +266,70 @@ public long getFrom(TemporalAccessor temporal) { .appendLiteral('.') .toFormatter(Locale.ROOT); - // this supports milliseconds - public static final DateTimeFormatter MILLISECONDS_FORMATTER1 = new DateTimeFormatterBuilder().optionalStart() + static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter( + "epoch_second", + new JavaTimeDateTimePrinter(SECONDS_FORMATTER1), + JavaTimeDateTimeParser.createRoundUpParserGenerator(builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L)), + new JavaTimeDateTimeParser(SECONDS_FORMATTER1), + new JavaTimeDateTimeParser(SECONDS_FORMATTER2) + ); + + public static final DateTimeFormatter MILLISECONDS_FORMATTER_BASE = new DateTimeFormatterBuilder().optionalStart() .appendText(NEGATIVE_SIGN_FIELD, Map.of(-1L, "-")) // field is only created in the presence of a '-' char. .optionalEnd() .appendValue(UNSIGNED_MILLIS, 1, 19, SignStyle.NOT_NEGATIVE) + .toFormatter(Locale.ROOT); + + // FIXME: clean these up and append one to the other + // this supports milliseconds + public static final DateTimeFormatter MILLISECONDS_FORMATTER = new DateTimeFormatterBuilder().append(MILLISECONDS_FORMATTER_BASE) .optionalStart() .appendFraction(NANOS_OF_MILLI, 0, 6, true) .optionalEnd() .toFormatter(Locale.ROOT); - // this supports milliseconds ending in dot - private static final DateTimeFormatter MILLISECONDS_FORMATTER2 = new DateTimeFormatterBuilder().optionalStart() - .appendText(NEGATIVE_SIGN_FIELD, Map.of(-1L, "-")) // field is only created in the presence of a '-' char. - .optionalEnd() - .appendValue(UNSIGNED_MILLIS, 1, 19, SignStyle.NOT_NEGATIVE) - .appendLiteral('.') + // this supports milliseconds + public static final DateTimeFormatter MILLISECONDS_PARSER_W_NANOS = new DateTimeFormatterBuilder().append(MILLISECONDS_FORMATTER_BASE) + .appendFraction(NANOS_OF_MILLI, 0, 6, true) .toFormatter(Locale.ROOT); - static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter( - "epoch_second", - new JavaTimeDateTimePrinter(SECONDS_FORMATTER1), - JavaTimeDateTimeParser.createRoundUpParserGenerator(builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L)), - new JavaTimeDateTimeParser(SECONDS_FORMATTER1), - new JavaTimeDateTimeParser(SECONDS_FORMATTER2) - ); + // we need an additional parser to detect the difference between user provided nanos and defaulted ones because of the necessity + // to parse the two differently in the round up case + public static final DateTimeFormatter MILLISECONDS_PARSER_WO_NANOS = new DateTimeFormatterBuilder().append(MILLISECONDS_FORMATTER_BASE) + .toFormatter(Locale.ROOT); + // we need an additional parser to detect the difference between user provided nanos and defaulted ones because of the necessity + // to parse the two differently in the round up case + public static final DateTimeFormatter MILLISECONDS_PARSER_WO_NANOS_ROUNDING = new DateTimeFormatterBuilder().append( + MILLISECONDS_FORMATTER_BASE + ).parseDefaulting(EpochTime.ROUNDED_SIGN_FIELD, -2L).parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L).toFormatter(Locale.ROOT); + + // this supports milliseconds ending in dot + private static final DateTimeFormatter MILLISECONDS_PARSER_ENDING_IN_PERIOD = new DateTimeFormatterBuilder().append( + MILLISECONDS_FORMATTER_BASE + ).appendLiteral('.').toFormatter(Locale.ROOT); + + /* + We separately handle the rounded and non-rounded uses cases here with different parsers. The reason is because of how we store and + handle negative milliseconds since the epoch. If a user supplies nanoseconds as part of a negative millisecond since epoch value + then we need to round toward negative infinity. However, in the case where nanos are not supplied, and we are requested to + round up we will default the value of nanos to 999_999 and need to delineate that this rounding was intended to push the value + toward positive infinity not negative infinity. Differentiating these two cases during parsing requires a flag called out above + the ROUNDED_SIGN_FIELD flag. In addition to this flag we need to know that we are in the "rounding up" state. So any time we are + asked to round up we will force setting the ROUNDED_SIGN_FIELD flag and be able to detect that when parsing and + storing the time information and be able to make the correct decision to round toward positive infinity. + */ static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter( "epoch_millis", - new JavaTimeDateTimePrinter(MILLISECONDS_FORMATTER1), - JavaTimeDateTimeParser.createRoundUpParserGenerator(builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L)), - new JavaTimeDateTimeParser(MILLISECONDS_FORMATTER1), - new JavaTimeDateTimeParser(MILLISECONDS_FORMATTER2) + new JavaTimeDateTimePrinter(MILLISECONDS_FORMATTER), + new JavaTimeDateTimeParser[] { + new JavaTimeDateTimeParser(MILLISECONDS_PARSER_WO_NANOS_ROUNDING), + new JavaTimeDateTimeParser(MILLISECONDS_PARSER_W_NANOS), + new JavaTimeDateTimeParser(MILLISECONDS_PARSER_ENDING_IN_PERIOD) }, + new JavaTimeDateTimeParser[] { + new JavaTimeDateTimeParser(MILLISECONDS_PARSER_WO_NANOS), + new JavaTimeDateTimeParser(MILLISECONDS_PARSER_W_NANOS), + new JavaTimeDateTimeParser(MILLISECONDS_PARSER_ENDING_IN_PERIOD) } ); private abstract static class EpochField implements TemporalField { diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index ae4845695734f..434db8254e06d 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -165,11 +165,40 @@ static DateFormatter combined(String input, List formatters) { input, printer, roundUpParsers.stream().flatMap(Arrays::stream).toArray(DateTimeParser[]::new), - parsers.stream().flatMap(Arrays::stream).toArray(DateTimeParser[]::new) + parsers.stream().flatMap(Arrays::stream).toArray(DateTimeParser[]::new), + false ); } - private JavaDateFormatter(String format, DateTimePrinter printer, DateTimeParser[] roundupParsers, DateTimeParser[] parsers) { + JavaDateFormatter(String format, DateTimePrinter printer, DateTimeParser[] roundupParsers, DateTimeParser[] parsers) { + this( + format, + printer, + Arrays.copyOf(roundupParsers, roundupParsers.length, DateTimeParser[].class), + Arrays.copyOf(parsers, parsers.length, DateTimeParser[].class), + true + ); + } + + private JavaDateFormatter( + String format, + DateTimePrinter printer, + DateTimeParser[] roundupParsers, + DateTimeParser[] parsers, + boolean doValidate + ) { + if (doValidate) { + if (format.contains("||")) { + throw new IllegalArgumentException("This class cannot handle multiple format specifiers"); + } + if (printer == null) { + throw new IllegalArgumentException("printer may not be null"); + } + if (parsers.length == 0) { + throw new IllegalArgumentException("parsers need to be specified"); + } + verifyPrinterParsers(printer, parsers); + } this.format = format; this.printer = printer; this.roundupParsers = roundupParsers; @@ -247,7 +276,8 @@ private JavaDateFormatter mapParsers(UnaryOperator printerMappi format, printerMapping.apply(printer), mapParsers(parserMapping, this.roundupParsers), - mapParsers(parserMapping, this.parsers) + mapParsers(parserMapping, this.parsers), + false ); } diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index 70a1735c7b1c5..463d0d4514bac 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -271,6 +271,21 @@ public void testEpochMillisParser() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("12345.0.")); assertThat(e.getMessage(), is("failed to parse date field [12345.0.] with format [epoch_millis]")); } + { + Instant instant = Instant.from(formatter.parse("-86400000")); + assertThat(instant.getEpochSecond(), is(-86400L)); + assertThat(instant.getNano(), is(0)); + assertThat(formatter.format(instant), is("-86400000")); + assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant)); + } + { + Instant instant = Instant.from(formatter.parse("-86400000.999999")); + assertThat(instant.getEpochSecond(), is(-86401L)); + assertThat(instant.getNano(), is(999000001)); + assertThat(formatter.format(instant), is("-86400000.999999")); + assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant)); + } + } /** diff --git a/server/src/test/java/org/elasticsearch/common/time/EpochTimeTests.java b/server/src/test/java/org/elasticsearch/common/time/EpochTimeTests.java new file mode 100644 index 0000000000000..59bf3958a3b26 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/time/EpochTimeTests.java @@ -0,0 +1,291 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common.time; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.test.ESTestCase; + +import java.time.Instant; +import java.time.ZoneId; +import java.util.function.LongSupplier; + +import static org.elasticsearch.common.time.EpochTime.MILLIS_FORMATTER; +import static org.hamcrest.Matchers.is; + +public class EpochTimeTests extends ESTestCase { + + public void testNegativeEpochMillis() { + DateFormatter formatter = MILLIS_FORMATTER; + + // validate that negative epoch millis around rounded appropriately by the parser + LongSupplier supplier = () -> 0L; + { + Instant instant = formatter.toDateMathParser().parse("0", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("0", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("1", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.001999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-1", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("1", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.001Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-1", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("0.999999", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0.999999", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999000001Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("0.999999", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0.999999", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999000001Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("6250000430768", supplier, true, ZoneId.of("UTC")); + assertEquals("2168-01-20T23:13:50.768999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-6250000430768", supplier, true, ZoneId.of("UTC")); + assertEquals("1771-12-12T00:46:09.232999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("6250000430768", supplier, false, ZoneId.of("UTC")); + assertEquals("2168-01-20T23:13:50.768Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-6250000430768", supplier, false, ZoneId.of("UTC")); + assertEquals("1771-12-12T00:46:09.232Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("0.123450", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000123450Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0.123450", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999876550Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("0.123450", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000123450Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0.123450", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999876550Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("0.123456", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000123456Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0.123456", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999876544Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("0.123456", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000123456Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0.123456", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999876544Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("86400000", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-02T00:00:00.000999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-86400000", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-31T00:00:00.000999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("86400000", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-02T00:00:00Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-86400000", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-31T00:00:00Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("86400000.999999", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-02T00:00:00.000999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-86400000.999999", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-30T23:59:59.999000001Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("86400000.999999", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-02T00:00:00.000999999Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-86400000.999999", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-30T23:59:59.999000001Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("200.89", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.200890Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-200.89", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.799110Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("200.89", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.200890Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-200.89", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.799110Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("200.", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.200Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-200.", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.800Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("200.", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.200Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-200.", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.800Z", instant.toString()); + } + + { + Instant instant = formatter.toDateMathParser().parse("0.200", supplier, true, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000200Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0.200", supplier, true, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999800Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("0.200", supplier, false, ZoneId.of("UTC")); + assertEquals("1970-01-01T00:00:00.000200Z", instant.toString()); + } + { + Instant instant = formatter.toDateMathParser().parse("-0.200", supplier, false, ZoneId.of("UTC")); + assertEquals("1969-12-31T23:59:59.999800Z", instant.toString()); + } + + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse(".200", supplier, true, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [.200] with format [epoch_millis]")); + } + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse("-.200", supplier, true, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [-.200] with format [epoch_millis]")); + } + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse(".200", supplier, false, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [.200] with format [epoch_millis]")); + } + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse("-.200", supplier, false, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [-.200] with format [epoch_millis]")); + } + + // tilda was included in the parsers at one point for delineating negative and positive infinity rounding and we want to + // ensure it doesn't show up unexpectedly in the parser with its original "~" value + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse("~-0.200", supplier, false, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [~-0.200] with format [epoch_millis]")); + } + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse("~0.200", supplier, false, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [~0.200] with format [epoch_millis]")); + } + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse("~-1", supplier, false, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [~-1] with format [epoch_millis]")); + } + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse("~1", supplier, false, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [~1] with format [epoch_millis]")); + } + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse("~-1.", supplier, false, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [~-1.] with format [epoch_millis]")); + } + { + ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> formatter.toDateMathParser().parse("~1.", supplier, false, ZoneId.of("UTC")) + ); + assertThat(e.getMessage().split(":")[0], is("failed to parse date field [~1.] with format [epoch_millis]")); + } + } + +} From e504cecf4f490eec1b7668320c941c06d966deb3 Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Thu, 19 Dec 2024 16:31:17 +0100 Subject: [PATCH 51/62] Fix release tests for SemanticMatchTestCase (#119022) * Fix release tests * Mute test that was already muted on 8.x --------- Co-authored-by: Elastic Machine --- muted-tests.yml | 3 +++ .../xpack/esql/qa/rest/SemanticMatchTestCase.java | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index f2294939b7aab..8a6acfeda203d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -290,6 +290,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/118914 - class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT issue: https://github.com/elastic/elasticsearch/issues/118970 +- class: org.elasticsearch.aggregations.bucket.SearchCancellationIT + method: testCancellationDuringTimeSeriesAggregation + issue: https://github.com/elastic/elasticsearch/issues/118992 - class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests method: testInvalidToken issue: https://github.com/elastic/elasticsearch/issues/119019 diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/SemanticMatchTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/SemanticMatchTestCase.java index aafa57e764ae7..b9314645cf2c9 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/SemanticMatchTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/SemanticMatchTestCase.java @@ -22,6 +22,8 @@ public abstract class SemanticMatchTestCase extends ESRestTestCase { public void testWithMultipleInferenceIds() throws IOException { + assumeTrue("semantic text capability not available", EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()); + String query = """ from test-semantic1,test-semantic2 | where match(semantic_text_field, "something") @@ -34,6 +36,8 @@ public void testWithMultipleInferenceIds() throws IOException { } public void testWithInferenceNotConfigured() { + assumeTrue("semantic text capability not available", EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()); + String query = """ from test-semantic3 | where match(semantic_text_field, "something") @@ -46,8 +50,6 @@ public void testWithInferenceNotConfigured() { @Before public void setUpIndices() throws IOException { - assumeTrue("semantic text capability not available", EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()); - var settings = Settings.builder().build(); String mapping1 = """ @@ -83,7 +85,6 @@ public void setUpIndices() throws IOException { @Before public void setUpTextEmbeddingInferenceEndpoint() throws IOException { - assumeTrue("semantic text capability not available", EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()); Request request = new Request("PUT", "_inference/text_embedding/test_dense_inference"); request.setJsonEntity(""" { @@ -101,7 +102,6 @@ public void setUpTextEmbeddingInferenceEndpoint() throws IOException { @After public void wipeData() throws IOException { - assumeTrue("semantic text capability not available", EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()); adminClient().performRequest(new Request("DELETE", "*")); try { From 9af140211d4f191d68eedcfd7fb3b8fe2e5700b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Dec 2024 16:40:27 +0100 Subject: [PATCH 52/62] Restore WildcardFieldMapperTests#testBWCIndexVersion (#119093) This test has been removed with #112570 but with the coming read-only support for v7 we need it back. --- .../mapper/WildcardFieldMapperTests.java | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index a1a01ebdcc590..0b31e96ece84a 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -106,6 +107,7 @@ static SearchExecutionContext createMockSearchExecutionContext(boolean allowExpe static final int MAX_FIELD_LENGTH = 30; static WildcardFieldMapper wildcardFieldType; + static WildcardFieldMapper wildcardFieldType79; static KeywordFieldMapper keywordFieldType; private DirectoryReader rewriteReader; private BaseDirectoryWrapper rewriteDir; @@ -127,6 +129,9 @@ public void setUp() throws Exception { builder.ignoreAbove(MAX_FIELD_LENGTH); wildcardFieldType = builder.build(MapperBuilderContext.root(false, false)); + Builder builder79 = new WildcardFieldMapper.Builder(WILDCARD_FIELD_NAME, IndexVersions.V_7_9_0); + wildcardFieldType79 = builder79.build(MapperBuilderContext.root(false, false)); + org.elasticsearch.index.mapper.KeywordFieldMapper.Builder kwBuilder = new KeywordFieldMapper.Builder( KEYWORD_FIELD_NAME, IndexVersion.current() @@ -207,6 +212,37 @@ public void testIgnoreAbove() throws IOException { assertTrue(fields.stream().anyMatch(field -> "field".equals(field.stringValue()))); } + public void testBWCIndexVersion() throws IOException { + // Create old format index using wildcard ngram analyzer used in 7.9 launch + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(WildcardFieldMapper.WILDCARD_ANALYZER_7_9); + iwc.setMergePolicy(newTieredMergePolicy(random())); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); + + Document doc = new Document(); + LuceneDocument parseDoc = new LuceneDocument(); + addFields(parseDoc, doc, "a b"); + indexDoc(parseDoc, doc, iw); + + iw.forceMerge(1); + DirectoryReader reader = iw.getReader(); + IndexSearcher searcher = newSearcher(reader); + iw.close(); + + // Unnatural circumstance - testing we fail if we were to use the new analyzer on old index + Query oldWildcardFieldQuery = wildcardFieldType.fieldType().wildcardQuery("a b", null, null); + TopDocs oldWildcardFieldTopDocs = searcher.search(oldWildcardFieldQuery, 10, Sort.INDEXORDER); + assertThat(oldWildcardFieldTopDocs.totalHits.value(), equalTo(0L)); + + // Natural circumstance test we revert to the old analyzer for old indices + Query wildcardFieldQuery = wildcardFieldType79.fieldType().wildcardQuery("a b", null, null); + TopDocs wildcardFieldTopDocs = searcher.search(wildcardFieldQuery, 10, Sort.INDEXORDER); + assertThat(wildcardFieldTopDocs.totalHits.value(), equalTo(1L)); + + reader.close(); + dir.close(); + } + // Test long query strings don't cause exceptions public void testTooBigQueryField() throws IOException { Directory dir = newDirectory(); From 6ee641bdfdfb589c9c0b55cc1478dfe959fb0d10 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Thu, 19 Dec 2024 16:44:53 +0100 Subject: [PATCH 53/62] ESQL - Update WHERE command docs with MATCH and full text functions examples (#118987) --- docs/reference/esql/esql-limitations.asciidoc | 11 +++++++++- .../esql/functions/description/match.asciidoc | 2 +- .../functions/kibana/definition/match.json | 2 +- .../esql/functions/kibana/docs/match.md | 9 +++++++- .../esql/functions/search-functions.asciidoc | 4 +++- .../esql/processing-commands/where.asciidoc | 22 ++++++++++++++++++- .../expression/function/fulltext/Match.java | 11 ++++++++-- 7 files changed, 53 insertions(+), 8 deletions(-) diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index c2849e4889f98..0710acf3749d2 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -112,7 +112,7 @@ it is necessary to use the search function, like <>, in a <> source command, or close enough to it. Otherwise, the query will fail with a validation error. Another limitation is that any <> command containing a full-text search function -cannot also use disjunctions (`OR`). +cannot also use disjunctions (`OR`) unless all functions used in the OR clauses are full-text functions themselves. For example, this query is valid: @@ -139,6 +139,15 @@ FROM books | WHERE MATCH(author, "Faulkner") OR author LIKE "Hemingway" ---- +However this query will succeed because it uses full text functions on both `OR` clauses: + +[source,esql] +---- +FROM books +| WHERE MATCH(author, "Faulkner") OR QSTR("author: Hemingway") +---- + + Note that, because of <>, any queries on `text` fields that do not explicitly use the full-text functions, <> or <>, will behave as if the fields are actually `keyword` fields: diff --git a/docs/reference/esql/functions/description/match.asciidoc b/docs/reference/esql/functions/description/match.asciidoc index 25f0571878d47..931fd5eb2f94a 100644 --- a/docs/reference/esql/functions/description/match.asciidoc +++ b/docs/reference/esql/functions/description/match.asciidoc @@ -2,4 +2,4 @@ *Description* -Performs a <> on the specified field. Returns true if the provided query matches the row. +Use `MATCH` to perform a <> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. `MATCH` returns true if the provided query matches the row. diff --git a/docs/reference/esql/functions/kibana/definition/match.json b/docs/reference/esql/functions/kibana/definition/match.json index 7f2a8239cc0d0..d61534da81a6d 100644 --- a/docs/reference/esql/functions/kibana/definition/match.json +++ b/docs/reference/esql/functions/kibana/definition/match.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "match", - "description" : "Performs a <> on the specified field. Returns true if the provided query matches the row.", + "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on text fields, as well as other field types like boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/docs/match.md b/docs/reference/esql/functions/kibana/docs/match.md index adf6de91c90f1..72258a1682936 100644 --- a/docs/reference/esql/functions/kibana/docs/match.md +++ b/docs/reference/esql/functions/kibana/docs/match.md @@ -3,7 +3,14 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### MATCH -Performs a <> on the specified field. Returns true if the provided query matches the row. +Use `MATCH` to perform a <> on the specified field. +Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. + +Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. + +For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. + +`MATCH` returns true if the provided query matches the row. ``` FROM books diff --git a/docs/reference/esql/functions/search-functions.asciidoc b/docs/reference/esql/functions/search-functions.asciidoc index 238813c382c8c..bfdd288542782 100644 --- a/docs/reference/esql/functions/search-functions.asciidoc +++ b/docs/reference/esql/functions/search-functions.asciidoc @@ -6,11 +6,13 @@ ++++ Full text functions are used to search for text in fields. -<> is used to analyze the query before it is searched. +<> is used to analyze the query before it is searched. Full text functions can be used to match <>. A multivalued field that contains a value that matches a full text query is considered to match the query. +Full text functions are significantly more performant for text search use cases on large data sets than using pattern matching or regular expressions with `LIKE` or `RLIKE` + See <> for information on the limitations of full text search. {esql} supports these full-text search functions: diff --git a/docs/reference/esql/processing-commands/where.asciidoc b/docs/reference/esql/processing-commands/where.asciidoc index 1d6fc1e90d595..68336d5358eaf 100644 --- a/docs/reference/esql/processing-commands/where.asciidoc +++ b/docs/reference/esql/processing-commands/where.asciidoc @@ -7,7 +7,7 @@ the input table for which the provided condition evaluates to `true`. [TIP] ==== -In case of value exclusions, fields with `null` values will be excluded from search results. +In case of value exclusions, fields with `null` values will be excluded from search results. In this context a `null` means either there is an explicit `null` value in the document or there is no value at all. For example: `WHERE field != "value"` will be interpreted as `WHERE field != "value" AND field IS NOT NULL`. ==== @@ -58,6 +58,26 @@ For a complete list of all functions, refer to <>. include::../functions/predicates.asciidoc[tag=body] +For matching text, you can use <> like `MATCH`. + +Use <> to perform a <> on a specified field. + +Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. + +[source.merge.styled,esql] +---- +include::{esql-specs}/match-function.csv-spec[tag=match-with-field] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/match-function.csv-spec[tag=match-with-field-result] +|=== + +[TIP] +==== +You can also use the shorthand <> `:` instead of `MATCH`. +==== + include::../functions/like.asciidoc[tag=body] include::../functions/rlike.asciidoc[tag=body] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java index e695a94198dab..fb9f8cff1b8b0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java @@ -98,8 +98,15 @@ public class Match extends FullTextFunction implements Validatable { @FunctionInfo( returnType = "boolean", preview = true, - description = "Performs a <> on the specified field. " - + "Returns true if the provided query matches the row.", + description = """ + Use `MATCH` to perform a <> on the specified field. + Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. + + Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. + + For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. + + `MATCH` returns true if the provided query matches the row.""", examples = { @Example(file = "match-function", tag = "match-with-field") } ) public Match( From 338d2bdcd8901618d7b96bda4a9059ab682207b9 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 19 Dec 2024 16:51:08 +0100 Subject: [PATCH 54/62] Removed forgotten test mute (#119092) The test failure #118728 was fixed by #118944 but test wasn't unmuted. --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 8a6acfeda203d..34b8f1bd18d52 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,9 +279,6 @@ tests: - class: org.elasticsearch.xpack.security.QueryableReservedRolesIT method: testDeletingAndCreatingSecurityIndexTriggersSynchronization issue: https://github.com/elastic/elasticsearch/issues/118806 -- class: org.elasticsearch.index.engine.RecoverySourcePruneMergePolicyTests - method: testPruneSome - issue: https://github.com/elastic/elasticsearch/issues/118728 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/indices/shard-stores/line_150} issue: https://github.com/elastic/elasticsearch/issues/118896 From 77eb19172418d3b336180ae594266fb25c724b49 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 19 Dec 2024 17:39:15 +0100 Subject: [PATCH 55/62] Remove min_read_only_index_version from XContent node (#119083) We prefer to remove this information from the API since they are not useful externally, impact the search shard API and may be removed later (which would be a breaking change). Follow-up #118744 --- .../java/org/elasticsearch/cluster/node/DiscoveryNode.java | 1 - .../admin/cluster/reroute/ClusterRerouteResponseTests.java | 2 -- .../java/org/elasticsearch/cluster/ClusterStateTests.java | 6 ------ .../org/elasticsearch/cluster/node/DiscoveryNodeTests.java | 2 -- .../collector/cluster/ClusterStatsMonitoringDocTests.java | 2 -- 5 files changed, 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 7c757e7657853..46a83495ecfb9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -595,7 +595,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endArray(); builder.field("version", versionInfo.buildVersion().toString()); builder.field("min_index_version", versionInfo.minIndexVersion()); - builder.field("min_read_only_index_version", versionInfo.minReadOnlyIndexVersion()); builder.field("max_index_version", versionInfo.maxIndexVersion()); builder.endObject(); return builder; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 69cff0fc45ac3..b59cc13a20ff2 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -127,7 +127,6 @@ public void testToXContentWithDeprecatedClusterState() { ], "version": "%s", "min_index_version": %s, - "min_read_only_index_version": %s, "max_index_version": %s } }, @@ -219,7 +218,6 @@ public void testToXContentWithDeprecatedClusterState() { clusterState.getNodes().get("node0").getEphemeralId(), Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, - IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), IndexVersion.current(), IndexVersion.current() diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 5f4426b02ce1a..668aea70c23f2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -213,7 +213,6 @@ public void testToXContent() throws IOException { ], "version": "%s", "min_index_version":%s, - "min_read_only_index_version":%s, "max_index_version":%s } }, @@ -390,7 +389,6 @@ public void testToXContent() throws IOException { ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, - IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), @@ -490,7 +488,6 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti ], "version" : "%s", "min_index_version" : %s, - "min_read_only_index_version" : %s, "max_index_version" : %s } }, @@ -666,7 +663,6 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, - IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), @@ -766,7 +762,6 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti ], "version" : "%s", "min_index_version" : %s, - "min_read_only_index_version" : %s, "max_index_version" : %s } }, @@ -948,7 +943,6 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, - IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java index fa7633f0eaf75..a91cef576df33 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java @@ -223,7 +223,6 @@ public void testDiscoveryNodeToXContent() { ], "version" : "%s", "min_index_version" : %s, - "min_read_only_index_version" : %s, "max_index_version" : %s } }""", @@ -231,7 +230,6 @@ public void testDiscoveryNodeToXContent() { withExternalId ? "test-external-id" : "test-name", Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, - IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ) ) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 35da4abec223a..f4d50df4ff613 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -462,7 +462,6 @@ public void testToXContent() throws IOException { pluginEsBuildVersion, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, - IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), apmIndicesExist }; final String expectedJson = """ @@ -818,7 +817,6 @@ public void testToXContent() throws IOException { ], "version": "%s", "min_index_version":%s, - "min_read_only_index_version":%s, "max_index_version":%s } }, From 97e6bb6ce3979e85b6258cda64cf667f1188473a Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Thu, 19 Dec 2024 16:55:54 +0000 Subject: [PATCH 56/62] Add cluster setting to enable failure store (#118662) This setting enables or disables the failure store for data streams based on matching the data stream name against a list of patterns. It acts as a default, and is overridden if the failure store is explicitly enabled or disabled either in a component template or using the data stream options API. (See the PR for explanations of some of the changes here.) --- .../datastreams/DataStreamOptionsIT.java | 75 +++++- .../action/TransportGetDataStreamsAction.java | 22 +- .../action/GetDataStreamsResponseTests.java | 8 +- .../TransportGetDataStreamsActionTests.java | 105 +++++++++ ...stream.yml => 200_require_data_stream.yml} | 0 ...ore.yml => 210_rollover_failure_store.yml} | 0 .../220_failure_store_cluster_setting.yml | 222 ++++++++++++++++++ .../org/elasticsearch/TransportVersions.java | 1 + .../indices/create/CreateIndexRequest.java | 8 +- .../action/bulk/BulkOperation.java | 16 +- .../bulk/TransportAbstractBulkAction.java | 8 +- .../action/bulk/TransportBulkAction.java | 51 ++-- .../datastreams/GetDataStreamAction.java | 40 +++- .../action/index/IndexRequest.java | 8 +- .../cluster/metadata/DataStream.java | 68 +++++- .../DataStreamFailureStoreSettings.java | 72 ++++++ .../cluster/metadata/DataStreamOptions.java | 10 - .../metadata/IndexNameExpressionResolver.java | 12 +- .../cluster/metadata/Metadata.java | 3 +- .../MetadataCreateDataStreamService.java | 13 +- .../common/settings/ClusterSettings.java | 13 +- .../elasticsearch/node/NodeConstruction.java | 7 +- .../RestClusterUpdateSettingsAction.java | 9 + .../MetadataRolloverServiceTests.java | 2 +- .../action/bulk/BulkOperationTests.java | 110 ++++++++- .../bulk/TransportBulkActionIngestTests.java | 5 +- .../action/bulk/TransportBulkActionTests.java | 159 ++++++++----- .../bulk/TransportBulkActionTookTests.java | 5 +- .../datastreams/GetDataStreamActionTests.java | 1 + .../DataStreamFailureStoreSettingsTests.java | 87 +++++++ .../cluster/metadata/DataStreamTests.java | 155 ++++++++++++ .../snapshots/SnapshotResiliencyTests.java | 4 +- .../metadata/DataStreamTestHelper.java | 30 ++- .../xpack/core/DataStreamRestIT.java | 19 +- .../DataStreamUsageTransportAction.java | 20 +- .../DataStreamFeatureSetUsage.java | 12 +- .../DataStreamFeatureSetUsageTests.java | 1 + 37 files changed, 1213 insertions(+), 168 deletions(-) rename modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/{190_require_data_stream.yml => 200_require_data_stream.yml} (100%) rename modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/{200_rollover_failure_store.yml => 210_rollover_failure_store.yml} (100%) create mode 100644 modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/220_failure_store_cluster_setting.yml create mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreSettings.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreSettingsTests.java diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java index de6b7a682324e..482867d072fc2 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java @@ -12,6 +12,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestStatus; import org.junit.Before; import java.io.IOException; @@ -122,13 +125,25 @@ public void testExplicitlyResetDataStreamOptions() throws IOException { assertOK(client().performRequest(otherRequest)); } - public void testEnableDisableFailureStore() throws IOException { + public void testBehaviorWithEachFailureStoreOptionAndClusterSetting() throws IOException { { + // Default data stream options assertAcknowledged(client().performRequest(new Request("DELETE", "/_data_stream/" + DATA_STREAM_NAME + "/_options"))); - assertFailureStore(false, 1); + setDataStreamFailureStoreClusterSetting(DATA_STREAM_NAME); assertDataStreamOptions(null); + assertFailureStoreValuesInGetDataStreamResponse(true, 1); + assertRedirectsDocWithBadMappingToFailureStore(); + setDataStreamFailureStoreClusterSetting("does-not-match-failure-data-stream"); + assertDataStreamOptions(null); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); + setDataStreamFailureStoreClusterSetting(null); // should get same behaviour as when we set it to something non-matching + assertDataStreamOptions(null); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); } { + // Data stream options with failure store enabled Request enableRequest = new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME + "/_options"); enableRequest.setJsonEntity(""" { @@ -137,11 +152,21 @@ public void testEnableDisableFailureStore() throws IOException { } }"""); assertAcknowledged(client().performRequest(enableRequest)); - assertFailureStore(true, 1); + setDataStreamFailureStoreClusterSetting(DATA_STREAM_NAME); + assertDataStreamOptions(true); + assertFailureStoreValuesInGetDataStreamResponse(true, 1); + assertRedirectsDocWithBadMappingToFailureStore(); + setDataStreamFailureStoreClusterSetting("does-not-match-failure-data-stream"); // should have no effect as enabled in options assertDataStreamOptions(true); + assertFailureStoreValuesInGetDataStreamResponse(true, 1); + assertRedirectsDocWithBadMappingToFailureStore(); + setDataStreamFailureStoreClusterSetting(null); // same as previous + assertDataStreamOptions(true); + assertFailureStoreValuesInGetDataStreamResponse(true, 1); + assertRedirectsDocWithBadMappingToFailureStore(); } - { + // Data stream options with failure store disabled Request disableRequest = new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME + "/_options"); disableRequest.setJsonEntity(""" { @@ -150,13 +175,23 @@ public void testEnableDisableFailureStore() throws IOException { } }"""); assertAcknowledged(client().performRequest(disableRequest)); - assertFailureStore(false, 1); + setDataStreamFailureStoreClusterSetting(DATA_STREAM_NAME); // should have no effect as disabled in options assertDataStreamOptions(false); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); + setDataStreamFailureStoreClusterSetting("does-not-match-failure-data-stream"); + assertDataStreamOptions(false); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); + setDataStreamFailureStoreClusterSetting(null); + assertDataStreamOptions(false); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); } } @SuppressWarnings("unchecked") - private void assertFailureStore(boolean failureStoreEnabled, int failureStoreSize) throws IOException { + private void assertFailureStoreValuesInGetDataStreamResponse(boolean failureStoreEnabled, int failureStoreSize) throws IOException { final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); assertThat(dataStreams.size(), is(1)); @@ -198,4 +233,32 @@ private List getIndices(Map response) { List> indices = (List>) response.get("indices"); return indices.stream().map(index -> index.get("index_name")).toList(); } + + private static void setDataStreamFailureStoreClusterSetting(String value) throws IOException { + updateClusterSettings( + Settings.builder().put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), value).build() + ); + } + + private Response putDocumentWithBadMapping() throws IOException { + Request request = new Request("POST", DATA_STREAM_NAME + "/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "not a timestamp", + "foo": "bar" + } + """); + return client().performRequest(request); + } + + private void assertRedirectsDocWithBadMappingToFailureStore() throws IOException { + Response response = putDocumentWithBadMapping(); + String failureStoreResponse = (String) entityAsMap(response).get("failure_store"); + assertThat(failureStoreResponse, is("used")); + } + + private void assertFailsDocWithBadMapping() { + ResponseException e = assertThrows(ResponseException.class, this::putDocumentWithBadMapping); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(RestStatus.BAD_REQUEST.getStatus())); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsAction.java index ffa2447f5f5aa..2d310fef0be7e 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -64,6 +65,7 @@ public class TransportGetDataStreamsAction extends TransportMasterNodeReadAction private final SystemIndices systemIndices; private final ClusterSettings clusterSettings; private final DataStreamGlobalRetentionSettings globalRetentionSettings; + private final DataStreamFailureStoreSettings dataStreamFailureStoreSettings; private final Client client; @Inject @@ -75,6 +77,7 @@ public TransportGetDataStreamsAction( IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, DataStreamGlobalRetentionSettings globalRetentionSettings, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings, Client client ) { super( @@ -91,6 +94,7 @@ public TransportGetDataStreamsAction( this.systemIndices = systemIndices; this.globalRetentionSettings = globalRetentionSettings; clusterSettings = clusterService.getClusterSettings(); + this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings; this.client = new OriginSettingClient(client, "stack"); } @@ -122,6 +126,7 @@ public void onResponse(DataStreamsStatsAction.Response response) { systemIndices, clusterSettings, globalRetentionSettings, + dataStreamFailureStoreSettings, maxTimestamps ) ); @@ -134,7 +139,16 @@ public void onFailure(Exception e) { }); } else { listener.onResponse( - innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, globalRetentionSettings, null) + innerOperation( + state, + request, + indexNameExpressionResolver, + systemIndices, + clusterSettings, + globalRetentionSettings, + dataStreamFailureStoreSettings, + null + ) ); } } @@ -146,11 +160,16 @@ static GetDataStreamAction.Response innerOperation( SystemIndices systemIndices, ClusterSettings clusterSettings, DataStreamGlobalRetentionSettings globalRetentionSettings, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings, @Nullable Map maxTimestamps ) { List dataStreams = getDataStreams(state, indexNameExpressionResolver, request); List dataStreamInfos = new ArrayList<>(dataStreams.size()); for (DataStream dataStream : dataStreams) { + // For this action, we are returning whether the failure store is effectively enabled, either in metadata or by cluster setting. + // Users can use the get data stream options API to find out whether it is explicitly enabled in metadata. + boolean failureStoreEffectivelyEnabled = DataStream.isFailureStoreFeatureFlagEnabled() + && dataStream.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings); final String indexTemplate; boolean indexTemplatePreferIlmValue = true; String ilmPolicyName = null; @@ -254,6 +273,7 @@ public int compareTo(IndexInfo o) { dataStreamInfos.add( new GetDataStreamAction.Response.DataStreamInfo( dataStream, + failureStoreEffectivelyEnabled, streamHealth.getStatus(), indexTemplate, ilmPolicyName, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index 710ea8c15b66e..9414943cbb439 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -102,6 +102,7 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti Response.DataStreamInfo dataStreamInfo = new Response.DataStreamInfo( logs, + true, ClusterHealthStatus.GREEN, "index-template", null, @@ -205,6 +206,7 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti Response.DataStreamInfo dataStreamInfo = new Response.DataStreamInfo( logs, + true, ClusterHealthStatus.GREEN, "index-template", null, @@ -282,6 +284,7 @@ public void testManagedByDisplayValuesDontAccidentalyChange() { private Response.DataStreamInfo mutateInstance(Response.DataStreamInfo instance) { var dataStream = instance.getDataStream(); + var failureStoreEffectivelyEnabled = instance.isFailureStoreEffectivelyEnabled(); var status = instance.getDataStreamStatus(); var indexTemplate = instance.getIndexTemplate(); var ilmPolicyName = instance.getIlmPolicy(); @@ -289,7 +292,7 @@ private Response.DataStreamInfo mutateInstance(Response.DataStreamInfo instance) var indexSettings = instance.getIndexSettingsValues(); var templatePreferIlm = instance.templatePreferIlmValue(); var maximumTimestamp = instance.getMaximumTimestamp(); - switch (randomIntBetween(0, 7)) { + switch (randomIntBetween(0, 8)) { case 0 -> dataStream = randomValueOtherThan(dataStream, DataStreamTestHelper::randomInstance); case 1 -> status = randomValueOtherThan(status, () -> randomFrom(ClusterHealthStatus.values())); case 2 -> indexTemplate = randomBoolean() && indexTemplate != null ? null : randomAlphaOfLengthBetween(2, 10); @@ -314,9 +317,11 @@ private Response.DataStreamInfo mutateInstance(Response.DataStreamInfo instance) case 7 -> maximumTimestamp = (maximumTimestamp == null) ? randomNonNegativeLong() : (usually() ? randomValueOtherThan(maximumTimestamp, ESTestCase::randomNonNegativeLong) : null); + case 8 -> failureStoreEffectivelyEnabled = failureStoreEffectivelyEnabled ? false : true; } return new Response.DataStreamInfo( dataStream, + failureStoreEffectivelyEnabled, status, indexTemplate, ilmPolicyName, @@ -355,6 +360,7 @@ private Response.DataStreamInfo generateRandomDataStreamInfo() { List> timeSeries = randomBoolean() ? generateRandomTimeSeries() : null; return new Response.DataStreamInfo( DataStreamTestHelper.randomInstance(), + randomBoolean(), ClusterHealthStatus.GREEN, randomAlphaOfLengthBetween(2, 10), randomAlphaOfLengthBetween(2, 10), diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsActionTests.java index 2efe881266c1b..ba4627f8955a1 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; @@ -39,6 +40,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; public class TransportGetDataStreamsActionTests extends ESTestCase { @@ -48,6 +51,9 @@ public class TransportGetDataStreamsActionTests extends ESTestCase { private final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( ClusterSettings.createBuiltInClusterSettings() ); + private final DataStreamFailureStoreSettings emptyDataStreamFailureStoreSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings() + ); public void testGetDataStream() { final String dataStreamName = "my-data-stream"; @@ -166,6 +172,7 @@ public void testGetTimeSeriesDataStream() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat( @@ -197,6 +204,7 @@ public void testGetTimeSeriesDataStream() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat( @@ -248,6 +256,7 @@ public void testGetTimeSeriesDataStreamWithOutOfOrderIndices() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat( @@ -292,6 +301,7 @@ public void testGetTimeSeriesMixedDataStream() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); @@ -338,6 +348,7 @@ public void testPassingGlobalRetention() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat(response.getGlobalRetention(), nullValue()); @@ -363,8 +374,102 @@ public void testPassingGlobalRetention() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), withGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat(response.getGlobalRetention(), equalTo(globalRetention)); } + + public void testDataStreamIsFailureStoreEffectivelyEnabled_disabled() { + var metadata = new Metadata.Builder(); + DataStreamTestHelper.getClusterStateWithDataStreams( + metadata, + List.of(Tuple.tuple("data-stream-1", 2)), + List.of(), + System.currentTimeMillis(), + Settings.EMPTY, + 0, + false, + false + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); + var response = TransportGetDataStreamsAction.innerOperation( + state, + req, + resolver, + systemIndices, + ClusterSettings.createBuiltInClusterSettings(), + dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, + null + ); + assertThat(response.getDataStreams(), hasSize(1)); + assertThat(response.getDataStreams().getFirst().isFailureStoreEffectivelyEnabled(), is(false)); + } + + public void testDataStreamIsFailureStoreEffectivelyEnabled_enabledExplicitly() { + var metadata = new Metadata.Builder(); + DataStreamTestHelper.getClusterStateWithDataStreams( + metadata, + List.of(Tuple.tuple("data-stream-1", 2)), + List.of(), + System.currentTimeMillis(), + Settings.EMPTY, + 0, + false, + true + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); + var response = TransportGetDataStreamsAction.innerOperation( + state, + req, + resolver, + systemIndices, + ClusterSettings.createBuiltInClusterSettings(), + dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, + null + ); + assertThat(response.getDataStreams(), hasSize(1)); + assertThat(response.getDataStreams().getFirst().isFailureStoreEffectivelyEnabled(), is(true)); + } + + public void testDataStreamIsFailureStoreEffectivelyEnabled_enabledByClusterSetting() { + var metadata = new Metadata.Builder(); + DataStreamTestHelper.getClusterStateWithDataStreams( + metadata, + List.of(Tuple.tuple("data-stream-1", 2)), + List.of(), + System.currentTimeMillis(), + Settings.EMPTY, + 0, + false, + false + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); + var response = TransportGetDataStreamsAction.innerOperation( + state, + req, + resolver, + systemIndices, + ClusterSettings.createBuiltInClusterSettings(), + dataStreamGlobalRetentionSettings, + DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), "data-stream-*") + .build() + ) + ), + null + ); + assertThat(response.getDataStreams(), hasSize(1)); + assertThat(response.getDataStreams().getFirst().isFailureStoreEffectivelyEnabled(), is(true)); + } } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_require_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_require_data_stream.yml similarity index 100% rename from modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_require_data_stream.yml rename to modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_require_data_stream.yml diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml similarity index 100% rename from modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml rename to modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/220_failure_store_cluster_setting.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/220_failure_store_cluster_setting.yml new file mode 100644 index 0000000000000..90bd6fe406b57 --- /dev/null +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/220_failure_store_cluster_setting.yml @@ -0,0 +1,222 @@ +setup: + - requires: + reason: "Data stream options was added in 8.18+" + test_runner_features: [ capabilities, allowed_warnings, contains ] + capabilities: + - method: POST + path: /{index}/_doc + capabilities: [ 'failure_store_status' ] + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] + - method: PUT + path: /_cluster/settings + capabilities: [ 'data_stream_failure_store_cluster_setting' ] + + - do: + cluster.put_settings: + body: + persistent: + data_streams.failure_store.enabled: '*-matching' + + - do: + ingest.put_pipeline: + id: "failing_pipeline" + body: > + { + "processors": [ + { + "fail": { + "message" : "pipeline go boom" + } + } + ] + } + + - do: + indices.put_index_template: + name: index_template_default_fs + body: + index_patterns: default-fs-* + data_stream: {} + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + + - do: + cluster.put_component_template: + name: component_template_disabled_fs + body: + template: + data_stream_options: + failure_store: + enabled: false + + - do: + indices.put_index_template: + name: index_template_disabled_fs + body: + index_patterns: disabled-fs-* + data_stream: {} + composed_of: + - component_template_disabled_fs + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + +--- +teardown: + - do: + indices.delete_data_stream: + name: default-fs-matching + ignore: 404 + + - do: + indices.delete_data_stream: + name: disabled-fs-matching + ignore: 404 + + - do: + indices.delete_index_template: + name: index_template_disabled_fs + ignore: 404 + + - do: + cluster.delete_component_template: + name: component_template_disabled_fs + ignore: 404 + + - do: + indices.delete_index_template: + name: index_template_default_fs + ignore: 404 + + - do: + ingest.delete_pipeline: + id: "failing_pipeline" + ignore: 404 + + - do: + cluster.put_settings: + body: + persistent: + data_streams.failure_store.enabled: null + +--- +"Redirect ingest failure when auto-creating data stream to failure store when enabled by setting": + - do: + index: + index: default-fs-matching + refresh: true + pipeline: 'failing_pipeline' + body: + '@timestamp': '2020-12-12' + foo: bar + - match: { failure_store: used } + - match: { _index: '/\.fs-default-fs-matching-(\d{4}\.\d{2}\.\d{2}-)?\d{6}/' } + +--- +"Redirect ingest failure into pre-existing data stream to failure store when enabled by setting": + - do: + indices.create_data_stream: + name: default-fs-matching + + - do: + index: + index: default-fs-matching + refresh: true + pipeline: 'failing_pipeline' + body: + '@timestamp': '2020-12-12' + foo: bar + - match: { failure_store: used } + - match: { _index: '/\.fs-default-fs-matching-(\d{4}\.\d{2}\.\d{2}-)?\d{6}/' } + +--- +"Do not redirect ingest failure when auto-creating data stream to failure store when enabled by setting but disabled in template": + - do: + index: + index: disabled-fs-matching + refresh: true + pipeline: 'failing_pipeline' + body: + '@timestamp': '2020-12-12' + foo: bar + catch: '/pipeline go boom/' + +--- +"Do not redirect ingest failure into pre-existing data stream to failure store when enabled by setting but disabled in template": + - do: + indices.create_data_stream: + name: disabled-fs-matching + + - do: + index: + index: disabled-fs-matching + refresh: true + pipeline: 'failing_pipeline' + body: + '@timestamp': '2020-12-12' + foo: bar + catch: '/pipeline go boom/' + +--- +"Redirect mapping failure when auto-creating data stream to failure store when enabled by setting": + - do: + index: + index: default-fs-matching + refresh: true + body: + '@timestamp': 'not a timestamp' + foo: bar + - match: { failure_store: used } + - match: { _index: '/\.fs-default-fs-matching-(\d{4}\.\d{2}\.\d{2}-)?\d{6}/' } + +--- +"Redirect mapping failure into pre-existing data stream to failure store when enabled by setting": + - do: + indices.create_data_stream: + name: default-fs-matching + + - do: + index: + index: default-fs-matching + refresh: true + body: + '@timestamp': 'not a timestamp' + foo: bar + - match: { failure_store: used } + - match: { _index: '/\.fs-default-fs-matching-(\d{4}\.\d{2}\.\d{2}-)?\d{6}/' } + +--- +"Do not redirect mapping failure when auto-creating data stream to failure store when enabled by setting but disabled in template": + - do: + index: + index: disabled-fs-matching + refresh: true + body: + '@timestamp': 'not a timestamp' + foo: bar + catch: '/failed to parse field/' + +--- +"Do not redirect mapping failure into pre-existing data stream to failure store when enabled by setting but disabled in template": + - do: + indices.create_data_stream: + name: disabled-fs-matching + + - do: + index: + index: disabled-fs-matching + refresh: true + body: + '@timestamp': 'not a timestamp' + foo: bar + catch: '/failed to parse field/' + +# See also DataStreamOptionsIT for tests of the interaction between the failure store cluster setting and using +# the /_data_stream/{name}/_options API to explicitly enable and disable the failure store. (At time of writing, these +# can only be done in a Java REST test case, not a YAML one, because the failure store is behind a feature gate and so +# the REST API spec has not been added.) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 9bc2487f89b12..ab8b66e765e91 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -144,6 +144,7 @@ static TransportVersion def(int id) { public static final TransportVersion EQL_ALLOW_PARTIAL_SEARCH_RESULTS = def(8_809_00_0); public static final TransportVersion NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION = def(8_810_00_0); public static final TransportVersion ERROR_TRACE_IN_TRANSPORT_HEADER = def(8_811_00_0); + public static final TransportVersion FAILURE_STORE_ENABLED_BY_CLUSTER_SETTING = def(8_812_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index c233ed57b748e..cc96954c8a8e4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -480,13 +480,19 @@ public CreateIndexRequest requireDataStream(boolean requireDataStream) { return this; } + /** + * Returns whether the failure store should be initialized. N.B. If true, failure store index creation will be performed regardless of + * whether the template indicates that the failure store is enabled. + */ public boolean isInitializeFailureStore() { return initializeFailureStore; } /** * Set whether this CreateIndexRequest should initialize the failure store on data stream creation. This can be necessary when, for - * example, a failure occurs while trying to ingest a document into a data stream that has to be auto-created. + * example, a failure occurs while trying to ingest a document into a data stream that has to be auto-created. N.B. If true, failure + * store index creation will be performed regardless of whether the template indicates that the failure store is enabled. It is the + * caller's responsibility to ensure that this is correct. */ public CreateIndexRequest initializeFailureStore(boolean initializeFailureStore) { this.initializeFailureStore = initializeFailureStore; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 4df228240add5..b137809047d18 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -98,6 +99,7 @@ final class BulkOperation extends ActionRunnable { private final Set failedRolloverRequests = ConcurrentCollections.newConcurrentSet(); private final Map shortCircuitShardFailures = ConcurrentCollections.newConcurrentMap(); private final FailureStoreMetrics failureStoreMetrics; + private final DataStreamFailureStoreSettings dataStreamFailureStoreSettings; BulkOperation( Task task, @@ -111,7 +113,8 @@ final class BulkOperation extends ActionRunnable { LongSupplier relativeTimeProvider, long startTimeNanos, ActionListener listener, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { this( task, @@ -127,7 +130,8 @@ final class BulkOperation extends ActionRunnable { listener, new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()), new FailureStoreDocumentConverter(), - failureStoreMetrics + failureStoreMetrics, + dataStreamFailureStoreSettings ); } @@ -145,7 +149,8 @@ final class BulkOperation extends ActionRunnable { ActionListener listener, ClusterStateObserver observer, FailureStoreDocumentConverter failureStoreDocumentConverter, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { super(listener); this.task = task; @@ -164,6 +169,7 @@ final class BulkOperation extends ActionRunnable { this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); this.shortCircuitShardFailures.putAll(bulkRequest.incrementalState().shardLevelFailures()); this.failureStoreMetrics = failureStoreMetrics; + this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings; } @Override @@ -544,7 +550,7 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques // Do not redirect documents to a failure store that were already headed to one. var isFailureStoreRequest = isFailureStoreRequest(docWriteRequest); if (isFailureStoreRequest == false - && failureStoreCandidate.isFailureStoreEnabled() + && failureStoreCandidate.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings) && error instanceof VersionConflictEngineException == false && error instanceof EsRejectedExecutionException == false) { // Prepare the data stream failure store if necessary @@ -577,7 +583,7 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques if (isFailureStoreRequest) { return IndexDocFailureStoreStatus.FAILED; } - if (failureStoreCandidate.isFailureStoreEnabled() == false) { + if (failureStoreCandidate.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings) == false) { return IndexDocFailureStoreStatus.NOT_ENABLED; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java index e83bca4b661c9..24534826f8e3e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -343,9 +343,11 @@ public boolean isForceExecution() { * @param indexName The index name to check. * @param metadata Cluster state metadata. * @param epochMillis A timestamp to use when resolving date math in the index name. - * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store - * or if it matches a template that has a data stream failure store enabled. Returns false if the index name corresponds to a - * data stream, but it doesn't have the failure store enabled. Returns null when it doesn't correspond to a data stream. + * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store, or if it + * matches a template that has a data stream failure store enabled, or if it matches a data stream template with no failure store + * option specified and the name matches the cluster setting to enable the failure store. Returns false if the index name + * corresponds to a data stream, but it doesn't have the failure store enabled by one of those conditions. Returns null when it + * doesn't correspond to a data stream. */ protected abstract Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index e2c73349b93ec..65264faf50129 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.admin.indices.create.AutoCreateAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -35,6 +34,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamOptions; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -85,6 +85,7 @@ public class TransportBulkAction extends TransportAbstractBulkAction { private final IndexNameExpressionResolver indexNameExpressionResolver; private final OriginSettingClient rolloverClient; private final FailureStoreMetrics failureStoreMetrics; + private final DataStreamFailureStoreSettings dataStreamFailureStoreSettings; @Inject public TransportBulkAction( @@ -98,7 +99,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { this( threadPool, @@ -112,7 +114,8 @@ public TransportBulkAction( indexingPressure, systemIndices, threadPool::relativeTimeInNanos, - failureStoreMetrics + failureStoreMetrics, + dataStreamFailureStoreSettings ); } @@ -128,7 +131,8 @@ public TransportBulkAction( IndexingPressure indexingPressure, SystemIndices systemIndices, LongSupplier relativeTimeProvider, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { this( TYPE, @@ -144,7 +148,8 @@ public TransportBulkAction( indexingPressure, systemIndices, relativeTimeProvider, - failureStoreMetrics + failureStoreMetrics, + dataStreamFailureStoreSettings ); } @@ -162,7 +167,8 @@ public TransportBulkAction( IndexingPressure indexingPressure, SystemIndices systemIndices, LongSupplier relativeTimeProvider, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { super( bulkAction, @@ -176,6 +182,7 @@ public TransportBulkAction( systemIndices, relativeTimeProvider ); + this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings; Objects.requireNonNull(relativeTimeProvider); this.featureService = featureService; this.client = client; @@ -282,7 +289,7 @@ private void populateMissingTargets( for (DocWriteRequest request : bulkRequest.requests) { // Delete requests should not attempt to create the index (if the index does not exist), unless an external versioning is used. - if (request.opType() == OpType.DELETE + if (request.opType() == DocWriteRequest.OpType.DELETE && request.versionType() != VersionType.EXTERNAL && request.versionType() != VersionType.EXTERNAL_GTE) { continue; @@ -492,7 +499,7 @@ private void failRequestsWhenPrerequisiteActionFailed( static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, IndexAbstraction indexAbstraction) { DocWriteRequest.OpType opType = writeRequest.opType(); - if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { + if ((opType == DocWriteRequest.OpType.CREATE || opType == DocWriteRequest.OpType.INDEX) == false) { // op type not create or index, then bail early return; } @@ -588,7 +595,8 @@ void executeBulk( relativeTimeNanosProvider, startTimeNanos, listener, - failureStoreMetrics + failureStoreMetrics, + dataStreamFailureStoreSettings ).run(); } @@ -596,7 +604,7 @@ void executeBulk( * See {@link #resolveFailureStore(String, Metadata, long)} */ // Visibility for testing - static Boolean resolveFailureInternal(String indexName, Metadata metadata, long epochMillis) { + Boolean resolveFailureInternal(String indexName, Metadata metadata, long epochMillis) { if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { return null; } @@ -604,7 +612,7 @@ static Boolean resolveFailureInternal(String indexName, Metadata metadata, long if (resolution != null) { return resolution; } - return resolveFailureStoreFromTemplate(indexName, metadata); + return resolveFailureStoreFromTemplate(indexName, metadata, epochMillis); } @Override @@ -619,7 +627,7 @@ protected Boolean resolveFailureStore(String indexName, Metadata metadata, long * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if the given index name corresponds to an existing data stream with a failure store enabled. */ - private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { + private Boolean resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { if (indexName == null) { return null; } @@ -636,7 +644,7 @@ private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadat DataStream targetDataStream = DataStream.resolveDataStream(indexAbstraction, metadata); // We will store the failure if the write target belongs to a data stream with a failure store. - return targetDataStream != null && targetDataStream.isFailureStoreEnabled(); + return targetDataStream != null && targetDataStream.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings); } /** @@ -644,18 +652,20 @@ private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadat * a data stream feature, the method returns true/false only if it is a data stream template, otherwise null. * @param indexName The index name to check. * @param metadata Cluster state metadata. - * @return true the associated index template has failure store enabled, false if the failure store is disabled or it's not specified, - * and null if the template is not a data stream template. - * Visible for testing + * @param epochMillis A timestamp to use when resolving date math in the index name. + * @return true the associated index template has failure store enabled, false if the failure store is disabled, true or false according + * to the cluster setting if there is a data stream template with no failure store option specified, and null if no template is + * found or if the template is not a data stream template. */ @Nullable - static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { + private Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata, long epochMillis) { if (indexName == null) { return null; } // Check to see if the index name matches any templates such that an index would have been attributed // We don't check v1 templates at all because failure stores can only exist on data streams via a v2 template + // N.B. This currently does date math resolution itself and does *not* use epochMillis (it gets the system time again) String template = MetadataIndexTemplateService.findV2Template(metadata, indexName, false); if (template != null) { // Check if this is a data stream template or if it is just a normal index. @@ -666,7 +676,12 @@ static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metada composableIndexTemplate, metadata.componentTemplates() ).mapAndGet(DataStreamOptions.Template::toDataStreamOptions); - return dataStreamOptions != null && dataStreamOptions.isFailureStoreEnabled(); + return DataStream.isFailureStoreEffectivelyEnabled( + dataStreamOptions, + dataStreamFailureStoreSettings, + IndexNameExpressionResolver.resolveDateMathExpression(indexName, epochMillis), + systemIndices + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 93c40ad18cc8a..883fc543749c2 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -234,6 +234,7 @@ public static class DataStreamInfo implements SimpleDiffable, To private final DataStream dataStream; private final ClusterHealthStatus dataStreamStatus; + private final boolean failureStoreEffectivelyEnabled; // Must be serialized independently of dataStream as depends on settings @Nullable private final String indexTemplate; @Nullable @@ -247,6 +248,7 @@ public static class DataStreamInfo implements SimpleDiffable, To public DataStreamInfo( DataStream dataStream, + boolean failureStoreEffectivelyEnabled, ClusterHealthStatus dataStreamStatus, @Nullable String indexTemplate, @Nullable String ilmPolicyName, @@ -256,6 +258,7 @@ public DataStreamInfo( @Nullable Long maximumTimestamp ) { this.dataStream = dataStream; + this.failureStoreEffectivelyEnabled = failureStoreEffectivelyEnabled; this.dataStreamStatus = dataStreamStatus; this.indexTemplate = indexTemplate; this.ilmPolicyName = ilmPolicyName; @@ -267,22 +270,32 @@ public DataStreamInfo( @SuppressWarnings("unchecked") DataStreamInfo(StreamInput in) throws IOException { - this( - DataStream.read(in), - ClusterHealthStatus.readFrom(in), - in.readOptionalString(), - in.readOptionalString(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null, - in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readMap(Index::new, IndexProperties::new) : Map.of(), - in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readBoolean() : true, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalVLong() : null - ); + this.dataStream = DataStream.read(in); + this.failureStoreEffectivelyEnabled = in.getTransportVersion() + .onOrAfter(TransportVersions.FAILURE_STORE_ENABLED_BY_CLUSTER_SETTING) + ? in.readBoolean() + : dataStream.isFailureStoreExplicitlyEnabled(); // Revert to the behaviour before this field was added + this.dataStreamStatus = ClusterHealthStatus.readFrom(in); + this.indexTemplate = in.readOptionalString(); + this.ilmPolicyName = in.readOptionalString(); + this.timeSeries = in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) + ? in.readOptionalWriteable(TimeSeries::new) + : null; + this.indexSettingsValues = in.getTransportVersion().onOrAfter(V_8_11_X) + ? in.readMap(Index::new, IndexProperties::new) + : Map.of(); + this.templatePreferIlmValue = in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readBoolean() : true; + this.maximumTimestamp = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalVLong() : null; } public DataStream getDataStream() { return dataStream; } + public boolean isFailureStoreEffectivelyEnabled() { + return failureStoreEffectivelyEnabled; + } + public ClusterHealthStatus getDataStreamStatus() { return dataStreamStatus; } @@ -318,6 +331,9 @@ public Long getMaximumTimestamp() { @Override public void writeTo(StreamOutput out) throws IOException { dataStream.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_ENABLED_BY_CLUSTER_SETTING)) { + out.writeBoolean(failureStoreEffectivelyEnabled); + } dataStreamStatus.writeTo(out); out.writeOptionalString(indexTemplate); out.writeOptionalString(ilmPolicyName); @@ -398,7 +414,7 @@ public XContentBuilder toXContent( } if (DataStream.isFailureStoreFeatureFlagEnabled()) { builder.startObject(DataStream.FAILURE_STORE_FIELD.getPreferredName()); - builder.field(FAILURE_STORE_ENABLED.getPreferredName(), dataStream.isFailureStoreEnabled()); + builder.field(FAILURE_STORE_ENABLED.getPreferredName(), failureStoreEffectivelyEnabled); builder.field( DataStream.ROLLOVER_ON_WRITE_FIELD.getPreferredName(), dataStream.getFailureIndices().isRolloverOnWrite() @@ -477,6 +493,7 @@ public boolean equals(Object o) { DataStreamInfo that = (DataStreamInfo) o; return templatePreferIlmValue == that.templatePreferIlmValue && Objects.equals(dataStream, that.dataStream) + && failureStoreEffectivelyEnabled == that.failureStoreEffectivelyEnabled && dataStreamStatus == that.dataStreamStatus && Objects.equals(indexTemplate, that.indexTemplate) && Objects.equals(ilmPolicyName, that.ilmPolicyName) @@ -490,6 +507,7 @@ public int hashCode() { return Objects.hash( dataStream, dataStreamStatus, + failureStoreEffectivelyEnabled, indexTemplate, ilmPolicyName, timeSeries, diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 9f4231c25dfca..4343451256920 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -908,12 +908,18 @@ public IndexRequest setRequireAlias(boolean requireAlias) { } /** - * Transient flag denoting that the local request should be routed to a failure store. Not persisted across the wire. + * Returns a transient flag denoting that the local request should be routed to a failure store. Not persisted across the wire. N.B. If + * true, the failure store will be used regardless of whether the metadata indicates that the failure store is enabled. */ public boolean isWriteToFailureStore() { return writeToFailureStore; } + /** + * Sets a transient flag denoting that the local request should be routed to a failure store. Not persisted across the wire. N.B. If + * true, the failure store will be used regardless of whether the metadata indicates that the failure store is enabled. It is the + * caller's responsibility to ensure that this is correct. + */ public IndexRequest setWriteToFailureStore(boolean writeToFailureStore) { this.writeToFailureStore = writeToFailureStore; return this; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index db602ef6ef291..c1b015dc3700b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -37,6 +37,7 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -117,6 +118,7 @@ public static boolean isFailureStoreFeatureFlagEnabled() { private final DataStreamIndices backingIndices; private final DataStreamIndices failureIndices; + // visible for testing public DataStream( String name, List indices, @@ -150,7 +152,6 @@ public DataStream( ); } - // visible for testing DataStream( String name, long generation, @@ -299,7 +300,15 @@ public boolean rolloverOnWrite() { * @return true if it's a system index or has a dot-prefixed name. */ public boolean isInternal() { - return isSystem() || name.charAt(0) == '.'; + return isSystem() || isDotPrefixName(name); + } + + private static boolean isInternalName(String name, SystemIndices systemIndices) { + return isDotPrefixName(name) || systemIndices.isSystemDataStream(name); + } + + private static boolean isDotPrefixName(String name) { + return name.charAt(0) == '.'; } /** @@ -418,12 +427,55 @@ public boolean isAllowCustomRouting() { } /** - * Determines if this data stream has its failure store enabled or not. Currently, the failure store - * is enabled only when a user has explicitly requested it. - * @return true, if the user has explicitly enabled the failure store. + * Determines whether this data stream has its failure store enabled explicitly in its metadata. */ - public boolean isFailureStoreEnabled() { - return dataStreamOptions.isFailureStoreEnabled(); + public boolean isFailureStoreExplicitlyEnabled() { + return dataStreamOptions.failureStore() != null && Boolean.TRUE.equals(dataStreamOptions.failureStore().enabled()); + } + + /** + * Returns whether this data stream has its failure store enabled, either explicitly in its metadata or implicitly via settings. + * + *

If the failure store is either explicitly enabled or explicitly disabled in its options metadata, that value is returned. If not, + * it checks whether its name matches one of the patterns in the settings, and that the data stream is not internal (i.e. neither a + * dot-prefixed nor a system data stream). + * + * @param dataStreamFailureStoreSettings The settings to use to determine whether the failure store should be implicitly enabled + */ + public boolean isFailureStoreEffectivelyEnabled(DataStreamFailureStoreSettings dataStreamFailureStoreSettings) { + return isFailureStoreEffectivelyEnabled(dataStreamOptions, dataStreamFailureStoreSettings, name, isInternal()); + } + + /** + * Returns whether a data stream has its failure store enabled, either explicitly in its metadata or implicitly via settings, based + * on the given parameters. The logic is equivalent to that in + * {@link #isFailureStoreEffectivelyEnabled(DataStreamFailureStoreSettings)}. + * + * @param options The {@link DataStreamOptions} for the data stream (which may be null) + * @param dataStreamFailureStoreSettings The settings to use to determine whether the failure store should be implicitly enabled + * @param name The name of the data stream + * @param systemIndices The {@link SystemIndices} instance to use to determine whether this is a system data stream + */ + public static boolean isFailureStoreEffectivelyEnabled( + @Nullable DataStreamOptions options, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings, + String name, + SystemIndices systemIndices + ) { + return isFailureStoreEffectivelyEnabled(options, dataStreamFailureStoreSettings, name, isInternalName(name, systemIndices)); + } + + private static boolean isFailureStoreEffectivelyEnabled( + DataStreamOptions options, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings, + String name, + boolean isInternal + ) { + if (options != null && options.failureStore() != null && options.failureStore().enabled() != null) { + return options.failureStore().enabled(); + } else { + return (isInternal == false) && dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName(name); + } } @Nullable @@ -1106,7 +1158,7 @@ public void writeTo(StreamOutput out) throws IOException { } if (out.getTransportVersion() .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) { - out.writeBoolean(isFailureStoreEnabled()); + out.writeBoolean(isFailureStoreExplicitlyEnabled()); } if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { out.writeCollection(failureIndices.indices); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreSettings.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreSettings.java new file mode 100644 index 0000000000000..c5076d01eabb0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreSettings.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; + +import java.util.List; +import java.util.function.Predicate; + +import static org.elasticsearch.core.Predicates.never; + +/** + * Holder for the data stream global settings relating to the data stream failure store. This defines, validates, and monitors the settings. + */ +public class DataStreamFailureStoreSettings { + + private static final Logger logger = LogManager.getLogger(DataStreamFailureStoreSettings.class); + + public static final Setting> DATA_STREAM_FAILURE_STORED_ENABLED_SETTING = Setting.stringListSetting( + "data_streams.failure_store.enabled", + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private Predicate failureStoreEnabledByName; + + private DataStreamFailureStoreSettings() { + failureStoreEnabledByName = never(); + } + + /** + * Creates an instance and initialises the cluster settings listeners. + * + * @param clusterSettings The cluster settings to initialize the instance from and to watch for updates to + */ + public static DataStreamFailureStoreSettings create(ClusterSettings clusterSettings) { + DataStreamFailureStoreSettings dataStreamFailureStoreSettings = new DataStreamFailureStoreSettings(); + if (DataStream.isFailureStoreFeatureFlagEnabled()) { + clusterSettings.initializeAndWatch( + DATA_STREAM_FAILURE_STORED_ENABLED_SETTING, + dataStreamFailureStoreSettings::setEnabledByNamePatterns + ); + } + return dataStreamFailureStoreSettings; + } + + /** + * Returns whether the settings indicate that the failure store should be enabled by the cluster settings for the given name. + * + * @param name The data stream name + */ + public boolean failureStoreEnabledForDataStreamName(String name) { + assert DataStream.isFailureStoreFeatureFlagEnabled() : "Testing whether failure store is enabled should be behind by feature flag"; + return failureStoreEnabledByName.test(name); + } + + private void setEnabledByNamePatterns(List patterns) { + failureStoreEnabledByName = Regex.simpleMatcher(patterns.toArray(String[]::new)); + logger.info("Updated data stream name patterns for enabling failure store to [{}]", patterns); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java index 51e13c05e6892..423b698442581 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java @@ -70,16 +70,6 @@ public boolean isEmpty() { return failureStore == null; } - /** - * Determines if this data stream has its failure store enabled or not. Currently, the failure store - * is enabled only when a user has explicitly requested it. - * - * @return true, if the user has explicitly enabled the failure store. - */ - public boolean isFailureStoreEnabled() { - return failureStore != null && Boolean.TRUE.equals(failureStore.enabled()); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(failureStore); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 2ce91b66fa789..a2211cf8ea893 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -621,13 +621,11 @@ private static boolean shouldTrackConcreteIndex(Context context, Index index) { IndicesOptions options = context.getOptions(); if (DataStream.isFailureStoreFeatureFlagEnabled() && context.options.allowFailureIndices() == false) { DataStream parentDataStream = context.getState().metadata().getIndicesLookup().get(index.getName()).getParentDataStream(); - if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { - if (parentDataStream.isFailureStoreIndex(index.getName())) { - if (options.ignoreUnavailable()) { - return false; - } else { - throw new FailureIndexNotSupportedException(index); - } + if (parentDataStream != null && parentDataStream.isFailureStoreIndex(index.getName())) { + if (options.ignoreUnavailable()) { + return false; + } else { + throw new FailureIndexNotSupportedException(index); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index a4fa139043e50..f8545c6cf1686 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -2486,7 +2486,6 @@ private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexM assert parent == null || parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName())) || (DataStream.isFailureStoreFeatureFlagEnabled() - && parent.isFailureStoreEnabled() && parent.getFailureIndices() .getIndices() .stream() @@ -2512,7 +2511,7 @@ private static void collectDataStreams( for (Index i : dataStream.getIndices()) { indexToDataStreamLookup.put(i.getName(), dataStream); } - if (DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.isFailureStoreEnabled()) { + if (DataStream.isFailureStoreFeatureFlagEnabled()) { for (Index i : dataStream.getFailureIndices().getIndices()) { indexToDataStreamLookup.put(i.getName(), dataStream); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 0de87c7226380..254646f8e71a9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -205,7 +205,8 @@ static ClusterState createDataStream( * @param request The create data stream request * @param backingIndices List of backing indices. May be empty * @param writeIndex Write index for the data stream. If null, a new write index will be created. - * @param initializeFailureStore Whether the failure store should be initialized + * @param initializeFailureStore Whether the failure store should be initialized (N.B. if true, failure store index creation will be + * performed regardless of whether the template indicates that the failure store is enabled) * @return Cluster state containing the new data stream */ static ClusterState createDataStream( @@ -265,12 +266,13 @@ static ClusterState createDataStream( ? MetadataIndexTemplateService.resolveDataStreamOptions(template, systemDataStreamDescriptor.getComponentTemplates()) : MetadataIndexTemplateService.resolveDataStreamOptions(template, metadata.componentTemplates()); final DataStreamOptions dataStreamOptions = dataStreamOptionsTemplate.mapAndGet(DataStreamOptions.Template::toDataStreamOptions); - var isFailureStoreEnabled = dataStreamOptions != null && dataStreamOptions.isFailureStoreEnabled(); // If we need to create a failure store, do so first. Do not reroute during the creation since we will do - // that as part of creating the backing index if required. + // that as part of creating the backing index if required. N.B. This is done if initializeFailureStore, + // regardless of whether the template indicates that the failure store is enabled: it is the caller's + // responsibility to check that before setting. IndexMetadata failureStoreIndex = null; - if (isFailureStoreEnabled && initializeFailureStore) { + if (initializeFailureStore) { if (isSystem) { throw new IllegalArgumentException("Failure stores are not supported on system data streams"); } @@ -308,8 +310,7 @@ static ClusterState createDataStream( } assert writeIndex != null; assert writeIndex.mapping() != null : "no mapping found for backing index [" + writeIndex.getIndex().getName() + "]"; - assert isFailureStoreEnabled == false || initializeFailureStore == false || failureStoreIndex != null - : "failure store should have an initial index"; + assert initializeFailureStore == false || failureStoreIndex != null : "failure store should have an initial index"; assert failureStoreIndex == null || failureStoreIndex.mapping() != null : "no mapping found for failure store [" + failureStoreIndex.getIndex().getName() + "]"; diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index a01571b8c237d..aecc750bd4e39 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -37,6 +37,8 @@ import org.elasticsearch.cluster.coordination.MasterHistory; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.coordination.Reconfigurator; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexGraveyard; @@ -133,8 +135,12 @@ import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.watcher.ResourceWatcherService; +import java.util.Objects; import java.util.Set; import java.util.function.Predicate; +import java.util.stream.Stream; + +import static java.util.stream.Collectors.toSet; /** * Encapsulates all valid cluster level settings. @@ -205,7 +211,7 @@ public void apply(Settings value, Settings current, Settings previous) { } } - public static final Set> BUILT_IN_CLUSTER_SETTINGS = Set.of( + public static final Set> BUILT_IN_CLUSTER_SETTINGS = Stream.of( AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, @@ -616,6 +622,7 @@ public void apply(Settings value, Settings current, Settings previous) { TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE, DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING, DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING, - ShardsAvailabilityHealthIndicatorService.REPLICA_UNASSIGNED_BUFFER_TIME - ); + ShardsAvailabilityHealthIndicatorService.REPLICA_UNASSIGNED_BUFFER_TIME, + DataStream.isFailureStoreFeatureFlagEnabled() ? DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING : null + ).filter(Objects::nonNull).collect(toSet()); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 5cfe1c104d45e..212820594d43e 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -42,6 +42,7 @@ import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.MasterHistoryService; import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -627,7 +628,6 @@ private ScriptService createScriptService(SettingsModule settingsModule, ThreadP } private DataStreamGlobalRetentionSettings createDataStreamServicesAndGlobalRetentionResolver( - Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, @@ -637,6 +637,10 @@ private DataStreamGlobalRetentionSettings createDataStreamServicesAndGlobalReten clusterService.getClusterSettings() ); modules.bindToInstance(DataStreamGlobalRetentionSettings.class, dataStreamGlobalRetentionSettings); + modules.bindToInstance( + DataStreamFailureStoreSettings.class, + DataStreamFailureStoreSettings.create(clusterService.getClusterSettings()) + ); modules.bindToInstance( MetadataCreateDataStreamService.class, new MetadataCreateDataStreamService(threadPool, clusterService, metadataCreateIndexService) @@ -859,7 +863,6 @@ private void construct( ); final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = createDataStreamServicesAndGlobalRetentionResolver( - settings, threadPool, clusterService, indicesService, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java index cd154d2e5c50a..b4641a49d6977 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -33,6 +34,9 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler { private static final String PERSISTENT = "persistent"; private static final String TRANSIENT = "transient"; + // TODO: Remove this and use a single cluster feature / capability for the whole failure store feature when the feature flag is removed + private static final String DATA_STREAM_FAILURE_STORE_CLUSTER_SETTING_CAPABILITY = "data_stream_failure_store_cluster_setting"; + @Override public List routes() { return List.of(new Route(PUT, "/_cluster/settings")); @@ -73,4 +77,9 @@ protected Set responseParams() { public boolean canTripCircuitBreaker() { return false; } + + @Override + public Set supportedCapabilities() { + return DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(DATA_STREAM_FAILURE_STORE_CLUSTER_SETTING_CAPABILITY) : Set.of(); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 1a30fae1ebc00..eb034196f0d38 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -753,7 +753,7 @@ public void testValidation() throws Exception { // ensure no replicate data stream .promoteDataStream(); rolloverTarget = dataStream.getName(); - if (dataStream.isFailureStoreEnabled() && randomBoolean()) { + if (dataStream.isFailureStoreExplicitlyEnabled() && randomBoolean()) { defaultSelectorOptions = IndicesOptions.SelectorOptions.FAILURES; sourceIndexName = dataStream.getFailureStoreWriteIndex().getName(); defaultRolloverIndexName = DataStream.getDefaultFailureStoreName( diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java index 9a12b05d1cfd8..9360ce1719634 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamOptions; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -40,6 +41,7 @@ import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -89,6 +91,7 @@ public class BulkOperationTests extends ESTestCase { private final String dataStreamName = "my_data_stream"; private final String fsDataStreamName = "my_failure_store_data_stream"; private final String fsRolloverDataStreamName = "my_failure_store_to_be_rolled_over_data_stream"; + private final String fsBySettingsDataStreamName = "my_failure_store_enabled_by_setting_data_stream"; private final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) .settings( @@ -120,6 +123,12 @@ public class BulkOperationTests extends ESTestCase { private final IndexMetadata ds3FailureStore2 = DataStreamTestHelper.createFailureStore(fsRolloverDataStreamName, 2, millis) .numberOfShards(1) .build(); + private final IndexMetadata ds4BackingIndex1 = DataStreamTestHelper.createBackingIndex(fsBySettingsDataStreamName, 1, millis) + .numberOfShards(2) + .build(); + private final IndexMetadata ds4FailureStore1 = DataStreamTestHelper.createFailureStore(fsBySettingsDataStreamName, 1, millis) + .numberOfShards(1) + .build(); private final DataStream dataStream1 = DataStreamTestHelper.newInstance( dataStreamName, @@ -137,6 +146,11 @@ public class BulkOperationTests extends ESTestCase { DataStream.DataStreamIndices.failureIndicesBuilder(List.of(ds3FailureStore1.getIndex())).setRolloverOnWrite(true).build() ) .build(); + private final DataStream dataStream4 = DataStream.builder(fsBySettingsDataStreamName, List.of(ds4BackingIndex1.getIndex())) + .setGeneration(1) + .setDataStreamOptions(DataStreamOptions.EMPTY) + .setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(List.of(ds4FailureStore1.getIndex())).build()) + .build(); private final ClusterState DEFAULT_STATE = ClusterState.builder(ClusterName.DEFAULT) .metadata( @@ -172,11 +186,24 @@ public class BulkOperationTests extends ESTestCase { ds3BackingIndex1.getIndex().getName(), ds3BackingIndex1, ds3FailureStore1.getIndex().getName(), - ds3FailureStore1 + ds3FailureStore1, + ds4BackingIndex1.getIndex().getName(), + ds4BackingIndex1, + ds4FailureStore1.getIndex().getName(), + ds4FailureStore1 ) ) .dataStreams( - Map.of(dataStreamName, dataStream1, fsDataStreamName, dataStream2, fsRolloverDataStreamName, dataStream3), + Map.of( + dataStreamName, + dataStream1, + fsDataStreamName, + dataStream2, + fsRolloverDataStreamName, + dataStream3, + fsBySettingsDataStreamName, + dataStream4 + ), Map.of() ) .build() @@ -409,6 +436,58 @@ public void testFailingDocumentRedirectsToFailureStore() throws Exception { assertThat(failedItem.getFailureStoreStatus(), equalTo(IndexDocFailureStoreStatus.USED)); } + public void testFailingDocumentRedirectsToFailureStoreWhenEnabledByClusterSetting() { + Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled()); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add( + new IndexRequest(fsBySettingsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE) + ); + + NodeClient client = getNodeClient( + thatFailsDocuments( + Map.of(new IndexAndId(ds4BackingIndex1.getIndex().getName(), "1"), () -> new MapperException("mapping go boom")) + ) + ); + DataStreamFailureStoreSettings dataStreamFailureStoreSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), fsBySettingsDataStreamName) + .build() + ) + ); + + // Without the cluster setting, this bulk request should fail: + BulkResponse bulkItemResponsesWithoutClusterSetting = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); + assertThat(bulkItemResponsesWithoutClusterSetting.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponsesWithoutClusterSetting.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("mapping go boom"))); + + // With a cluster setting to enable the failure store for this data stream, the same request should be redirected: + BulkResponse bulkItemResponsesUsingClusterSetting = safeAwait( + l -> newBulkOperation( + DEFAULT_STATE, + client, + bulkRequest, + new AtomicArray<>(bulkRequest.numberOfActions()), + mockObserver(DEFAULT_STATE), + l, + new FailureStoreDocumentConverter(), + dataStreamFailureStoreSettings + ).run() + ); + assertThat(bulkItemResponsesUsingClusterSetting.hasFailures(), is(false)); + BulkItemResponse redirectedItem = Arrays.stream(bulkItemResponsesUsingClusterSetting.getItems()) + .filter(item -> item.getIndex().equals(ds4FailureStore1.getIndex().getName())) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(redirectedItem.getFailureStoreStatus(), equalTo(IndexDocFailureStoreStatus.USED)); + } + /** * A bulk operation to a data stream with a failure store enabled may still partially fail if the redirected documents experience * a shard-level failure while writing to the failure store indices. @@ -957,6 +1036,7 @@ private static BulkItemResponse requestToFailedResponse(BulkItemRequest itemRequ /** * Create a client that redirects expected actions to the provided function and fails if an unexpected operation happens. + * * @param onShardAction Called when TransportShardBulkAction is executed. * @return A node client for the test. */ @@ -966,6 +1046,7 @@ private NodeClient getNodeClient(BiConsumer listener, FailureStoreDocumentConverter failureStoreDocumentConverter + ) { + return newBulkOperation( + state, + client, + request, + existingResponses, + observer, + listener, + failureStoreDocumentConverter, + DataStreamFailureStoreSettings.create(ClusterSettings.createBuiltInClusterSettings()) + ); + } + + private BulkOperation newBulkOperation( + ClusterState state, + NodeClient client, + BulkRequest request, + AtomicArray existingResponses, + ClusterStateObserver observer, + ActionListener listener, + FailureStoreDocumentConverter failureStoreDocumentConverter, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { // Time provision long timeZero = TimeUnit.MILLISECONDS.toNanos(randomMillisUpToYear9999() - TimeUnit.DAYS.toMillis(1)); @@ -1105,7 +1208,8 @@ private BulkOperation newBulkOperation( listener, observer, failureStoreDocumentConverter, - FailureStoreMetrics.NOOP + FailureStoreMetrics.NOOP, + dataStreamFailureStoreSettings ); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 96b62056b6dc4..50885fc399c89 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; @@ -36,6 +37,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriConsumer; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Nullable; @@ -157,7 +159,8 @@ class TestTransportBulkAction extends TransportBulkAction { TestIndexNameExpressionResolver.newInstance(), new IndexingPressure(SETTINGS), EmptySystemIndices.INSTANCE, - FailureStoreMetrics.NOOP + FailureStoreMetrics.NOOP, + DataStreamFailureStoreSettings.create(ClusterSettings.createBuiltInClusterSettings()) ); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 6bc08995b932e..0032093459a0d 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; +import org.elasticsearch.cluster.metadata.DataStreamOptions; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexAbstraction.ConcreteIndex; @@ -39,6 +41,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.features.FeatureService; @@ -84,6 +87,8 @@ public class TransportBulkActionTests extends ESTestCase { + private final ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + /** Services needed by bulk action */ private TransportService transportService; private ClusterService clusterService; @@ -112,7 +117,8 @@ class TestTransportBulkAction extends TransportBulkAction { new Resolver(), new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE, - FailureStoreMetrics.NOOP + FailureStoreMetrics.NOOP, + DataStreamFailureStoreSettings.create(clusterSettings) ); } @@ -392,36 +398,52 @@ public void testRejectionAfterCreateIndexIsPropagated() { public void testResolveFailureStoreFromMetadata() throws Exception { assumeThat(DataStream.isFailureStoreFeatureFlagEnabled(), is(true)); - String dataStreamWithFailureStore = "test-data-stream-failure-enabled"; - String dataStreamWithoutFailureStore = "test-data-stream-failure-disabled"; + String dataStreamWithFailureStoreEnabled = "test-data-stream-failure-enabled"; + String dataStreamWithFailureStoreDefault = "test-data-stream-failure-default"; + String dataStreamWithFailureStoreDisabled = "test-data-stream-failure-disabled"; long testTime = randomMillisUpToYear9999(); - IndexMetadata backingIndex1 = DataStreamTestHelper.createFirstBackingIndex(dataStreamWithFailureStore, testTime).build(); - IndexMetadata backingIndex2 = DataStreamTestHelper.createFirstBackingIndex(dataStreamWithoutFailureStore, testTime).build(); - IndexMetadata failureStoreIndex1 = DataStreamTestHelper.createFirstFailureStore(dataStreamWithFailureStore, testTime).build(); + IndexMetadata backingIndex1 = DataStreamTestHelper.createFirstBackingIndex(dataStreamWithFailureStoreEnabled, testTime).build(); + IndexMetadata backingIndex2 = DataStreamTestHelper.createFirstBackingIndex(dataStreamWithFailureStoreDefault, testTime).build(); + IndexMetadata backingIndex3 = DataStreamTestHelper.createFirstBackingIndex(dataStreamWithFailureStoreDisabled, testTime).build(); + IndexMetadata failureStoreIndex1 = DataStreamTestHelper.createFirstFailureStore(dataStreamWithFailureStoreEnabled, testTime) + .build(); Metadata metadata = Metadata.builder() .dataStreams( Map.of( - dataStreamWithFailureStore, + dataStreamWithFailureStoreEnabled, DataStreamTestHelper.newInstance( - dataStreamWithFailureStore, + dataStreamWithFailureStoreEnabled, List.of(backingIndex1.getIndex()), 1L, Map.of(), false, null, - List.of(failureStoreIndex1.getIndex()) + List.of(), + DataStreamOptions.FAILURE_STORE_ENABLED ), - dataStreamWithoutFailureStore, + dataStreamWithFailureStoreDefault, DataStreamTestHelper.newInstance( - dataStreamWithoutFailureStore, + dataStreamWithFailureStoreDefault, List.of(backingIndex2.getIndex()), 1L, Map.of(), false, null, - List.of() + List.of(), + DataStreamOptions.EMPTY + ), + dataStreamWithFailureStoreDisabled, + DataStreamTestHelper.newInstance( + dataStreamWithFailureStoreDisabled, + List.of(backingIndex3.getIndex()), + 1L, + Map.of(), + false, + null, + List.of(), + DataStreamOptions.FAILURE_STORE_DISABLED ) ), Map.of() @@ -432,6 +454,8 @@ public void testResolveFailureStoreFromMetadata() throws Exception { backingIndex1, backingIndex2.getIndex().getName(), backingIndex2, + backingIndex3.getIndex().getName(), + backingIndex3, failureStoreIndex1.getIndex().getName(), failureStoreIndex1 ) @@ -439,38 +463,56 @@ public void testResolveFailureStoreFromMetadata() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.resolveFailureInternal(dataStreamWithFailureStore, metadata, testTime), is(true)); - // Data stream without failure store should not - assertThat(TransportBulkAction.resolveFailureInternal(dataStreamWithoutFailureStore, metadata, testTime), is(false)); + assertThat(bulkAction.resolveFailureInternal(dataStreamWithFailureStoreEnabled, metadata, testTime), is(true)); + // Data stream with the default failure store options should not... + assertThat(bulkAction.resolveFailureInternal(dataStreamWithFailureStoreDefault, metadata, testTime), is(false)); + // ...unless we change the cluster setting to enable it that way. + clusterSettings.applySettings( + Settings.builder() + .put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), dataStreamWithFailureStoreDefault) + .build() + ); + assertThat(bulkAction.resolveFailureInternal(dataStreamWithFailureStoreDefault, metadata, testTime), is(true)); + // Data stream with failure store explicitly disabled should not store failures even if it matches the cluster setting + clusterSettings.applySettings( + Settings.builder() + .put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), dataStreamWithFailureStoreDisabled) + .build() + ); + assertThat(bulkAction.resolveFailureInternal(dataStreamWithFailureStoreDisabled, metadata, testTime), is(false)); // An index should not be considered for failure storage - assertThat(TransportBulkAction.resolveFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(nullValue())); + assertThat(bulkAction.resolveFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(nullValue())); // even if that index is itself a failure store - assertThat( - TransportBulkAction.resolveFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), - is(nullValue()) - ); + assertThat(bulkAction.resolveFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), is(nullValue())); } public void testResolveFailureStoreFromTemplate() throws Exception { assumeThat(DataStream.isFailureStoreFeatureFlagEnabled(), is(true)); - String dsTemplateWithFailureStore = "test-data-stream-failure-enabled"; - String dsTemplateWithoutFailureStore = "test-data-stream-failure-disabled"; + String dsTemplateWithFailureStoreEnabled = "test-data-stream-failure-enabled"; + String dsTemplateWithFailureStoreDefault = "test-data-stream-failure-default"; + String dsTemplateWithFailureStoreDisabled = "test-data-stream-failure-disabled"; String indexTemplate = "test-index"; long testTime = randomMillisUpToYear9999(); Metadata metadata = Metadata.builder() .indexTemplates( Map.of( - dsTemplateWithFailureStore, + dsTemplateWithFailureStoreEnabled, ComposableIndexTemplate.builder() - .indexPatterns(List.of(dsTemplateWithFailureStore + "-*")) + .indexPatterns(List.of(dsTemplateWithFailureStoreEnabled + "-*")) .template(Template.builder().dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(true))) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(), - dsTemplateWithoutFailureStore, + dsTemplateWithFailureStoreDefault, + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dsTemplateWithFailureStoreDefault + "-*")) + .template(Template.builder().dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(null))) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), + dsTemplateWithFailureStoreDisabled, ComposableIndexTemplate.builder() - .indexPatterns(List.of(dsTemplateWithoutFailureStore + "-*")) + .indexPatterns(List.of(dsTemplateWithFailureStoreDisabled + "-*")) .template(Template.builder().dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(false))) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(), @@ -481,11 +523,36 @@ public void testResolveFailureStoreFromTemplate() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.resolveFailureInternal(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); - // Data stream without failure store should not - assertThat(TransportBulkAction.resolveFailureInternal(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); + assertThat(bulkAction.resolveFailureInternal(dsTemplateWithFailureStoreEnabled + "-1", metadata, testTime), is(true)); + // Same if date math is used + assertThat(bulkAction.resolveFailureInternal("<" + dsTemplateWithFailureStoreEnabled + "-{now}>", metadata, testTime), is(true)); + // Data stream with the default failure store options should not... + assertThat(bulkAction.resolveFailureInternal(dsTemplateWithFailureStoreDefault + "-1", metadata, testTime), is(false)); + assertThat(bulkAction.resolveFailureInternal("<" + dsTemplateWithFailureStoreDefault + "-{now}>", metadata, testTime), is(false)); + // ...unless we change the cluster setting to enable it that way. + clusterSettings.applySettings( + Settings.builder() + .put( + DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), + dsTemplateWithFailureStoreDefault + "*" + ) + .build() + ); + assertThat(bulkAction.resolveFailureInternal(dsTemplateWithFailureStoreDefault + "-1", metadata, testTime), is(true)); + assertThat(bulkAction.resolveFailureInternal("<" + dsTemplateWithFailureStoreDefault + "-{now}>", metadata, testTime), is(true)); + // Data stream with failure store explicitly disabled should not store failures even if it matches the cluster setting + clusterSettings.applySettings( + Settings.builder() + .put( + DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), + dsTemplateWithFailureStoreDisabled + "*" + ) + .build() + ); + assertThat(bulkAction.resolveFailureInternal(dsTemplateWithFailureStoreDisabled + "-1", metadata, testTime), is(false)); + assertThat(bulkAction.resolveFailureInternal("<" + dsTemplateWithFailureStoreDisabled + "-{now}>", metadata, testTime), is(false)); // An index template should not be considered for failure storage - assertThat(TransportBulkAction.resolveFailureInternal(indexTemplate + "-1", metadata, testTime), is(nullValue())); + assertThat(bulkAction.resolveFailureInternal(indexTemplate + "-1", metadata, testTime), is(nullValue())); } /** @@ -558,38 +625,6 @@ public void testFailuresDuringPrerequisiteActions() throws InterruptedException assertNull(bulkRequest.requests.get(2)); } - public void testFailureStoreFromTemplateResolution() { - Metadata metadata = Metadata.builder() - .indexTemplates( - Map.of( - "my-index-template", - ComposableIndexTemplate.builder().indexPatterns(List.of("my-index*")).build(), - "my-enabled-fs-template", - ComposableIndexTemplate.builder() - .indexPatterns(List.of("my-enabled*")) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) - .template(Template.builder().dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(true))) - .build(), - "my-disabled-fs-template", - ComposableIndexTemplate.builder() - .indexPatterns(List.of("my-disabled*")) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) - .template(Template.builder().dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(false))) - .build(), - "my-no-fs-template", - ComposableIndexTemplate.builder() - .indexPatterns(List.of("my-no*")) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) - .build() - ) - ) - .build(); - assertThat(TransportBulkAction.resolveFailureStoreFromTemplate("my-index", metadata), nullValue()); - assertThat(TransportBulkAction.resolveFailureStoreFromTemplate("my-enabled-fs", metadata), equalTo(true)); - assertThat(TransportBulkAction.resolveFailureStoreFromTemplate("my-disabled-fs", metadata), equalTo(false)); - assertThat(TransportBulkAction.resolveFailureStoreFromTemplate("my-no-fs", metadata), equalTo(false)); - } - private BulkRequest buildBulkRequest(List indices) { BulkRequest request = new BulkRequest(); for (String index : indices) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index b3d3ebe5e1357..2f033e4b5a383 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -21,11 +21,13 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -252,7 +254,8 @@ static class TestTransportBulkAction extends TransportBulkAction { new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE, relativeTimeProvider, - FailureStoreMetrics.NOOP + FailureStoreMetrics.NOOP, + DataStreamFailureStoreSettings.create(ClusterSettings.createBuiltInClusterSettings()) ); } } diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/GetDataStreamActionTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/GetDataStreamActionTests.java index b0b3531f54b48..feb00728c858e 100644 --- a/server/src/test/java/org/elasticsearch/action/datastreams/GetDataStreamActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/datastreams/GetDataStreamActionTests.java @@ -85,6 +85,7 @@ private static GetDataStreamAction.Response.DataStreamInfo newDataStreamInfo(boo DataStream dataStream = newDataStreamInstance(isSystem, retention); return new GetDataStreamAction.Response.DataStreamInfo( dataStream, + randomBoolean(), randomFrom(ClusterHealthStatus.values()), null, null, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreSettingsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreSettingsTests.java new file mode 100644 index 0000000000000..709d026cae034 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreSettingsTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.stream.Stream; + +import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiAlphanumOfLengthBetween; +import static org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING; +import static org.hamcrest.Matchers.is; + +public class DataStreamFailureStoreSettingsTests extends ESTestCase { + + public void testFailureStoreEnabledForDataStreamName_defaultSettings() { + DataStreamFailureStoreSettings dataStreamFailureStoreSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings() + ); + + // The default should return false for any input. + // The following will include some illegal names, but it's still valid to test how the method treats them. + Stream.generate(() -> randomAsciiAlphanumOfLengthBetween(random(), 1, 20)) + .limit(100) + .forEach(name -> assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName(name), is(false))); + Stream.generate(() -> randomUnicodeOfLengthBetween(1, 20)) + .limit(100) + .forEach(name -> assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName(name), is(false))); + } + + public void testFailureStoreEnabledForDataStreamName_exactMatches() { + DataStreamFailureStoreSettings dataStreamFailureStoreSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + // Match exactly 'foo' and 'bar' — whitespace should be stripped: + Settings.builder().put(DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), " foo , bar ").build() + ) + ); + + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("foo"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("bar"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("food"), is(false)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("tbar"), is(false)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName(".foo"), is(false)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("barf"), is(false)); + } + + public void testFailureStoreEnabledForDataStreamName_wildcardMatches() { + DataStreamFailureStoreSettings dataStreamFailureStoreSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder().put(DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), " foo* , *bar , a*z ").build() + ) + ); + + // These tests aren't exhaustive as the library used is tested thoroughly, but they provide a basic check of the correct usage: + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("foo"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("bar"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("food"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("tbar"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("az"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("a123z"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName(".foo"), is(false)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("barf"), is(false)); + } + + public void testFailureStoreEnabledForDataStreamName_respondsToSettingsChange() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings( + Settings.builder().put(DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), "foo").build() + ); + DataStreamFailureStoreSettings dataStreamFailureStoreSettings = DataStreamFailureStoreSettings.create(clusterSettings); + + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("foo"), is(true)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("bar"), is(false)); + + clusterSettings.applySettings(Settings.builder().put(DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), "bar").build()); + + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("foo"), is(false)); + assertThat(dataStreamFailureStoreSettings.failureStoreEnabledForDataStreamName("bar"), is(true)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index cfdcfe48c8d9a..f7f299683c3fc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; @@ -27,6 +28,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.test.ESTestCase; @@ -66,6 +68,8 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class DataStreamTests extends AbstractXContentSerializingTestCase { @@ -2280,6 +2284,157 @@ public void testInternalDataStream() { assertThat(dotPrefixedDataStream.isInternal(), is(true)); } + public void testIsFailureStoreExplicitlyEnabled() { + DataStream dataStreamNoFailureStoreOptions = createTestInstance().copy().setDataStreamOptions(DataStreamOptions.EMPTY).build(); + DataStream dataStreamFailureStoreDisabled = createTestInstance().copy() + .setDataStreamOptions(DataStreamOptions.FAILURE_STORE_DISABLED) + .build(); + DataStream dataStreamFailureStoreEnabled = createTestInstance().copy() + .setDataStreamOptions(DataStreamOptions.FAILURE_STORE_ENABLED) + .build(); + assertThat(dataStreamNoFailureStoreOptions.isFailureStoreExplicitlyEnabled(), is(false)); + assertThat(dataStreamFailureStoreDisabled.isFailureStoreExplicitlyEnabled(), is(false)); + assertThat(dataStreamFailureStoreEnabled.isFailureStoreExplicitlyEnabled(), is(true)); + } + + public void testIsFailureStoreEffectivelyEnabled_instanceMethod() { + DataStream dataStreamNoFailureStoreOptions = createTestInstance().copy() + .setName("my-data-stream-no-failure-store-options") + .setDataStreamOptions(DataStreamOptions.EMPTY) + .build(); + DataStream dataStreamFailureStoreExplicitlyDisabled = createTestInstance().copy() + .setName("my-data-stream-failure-store-explicitly-disabled") + .setDataStreamOptions(DataStreamOptions.FAILURE_STORE_DISABLED) + .build(); + DataStream dataStreamFailureStoreExplicitlyEnabled = createTestInstance().copy() + .setName("my-data-stream-failure-store-explicitly-enabled") + .setDataStreamOptions(DataStreamOptions.FAILURE_STORE_ENABLED) + .build(); + DataStream dotPrefixDataStreamNoFailureStoreOptions = createTestInstance().copy() + .setName(".my-data-stream-no-failure-store-options") + .setDataStreamOptions(DataStreamOptions.EMPTY) + .build(); + DataStream systemDataStreamNoFailureStoreOptions = createTestInstance().copy() + .setName("my-data-stream-system-no-failure-store-options") + .setDataStreamOptions(DataStreamOptions.EMPTY) + .setSystem(true) + .setHidden(true) // system indices must be hidden + .build(); + DataStreamFailureStoreSettings matchingSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put( + DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), + String.join(",", "my-data-stream-*", ".my-data-stream-*") + ) + .build() + ) + ); + DataStreamFailureStoreSettings nonMatchingSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), "not-my-data-stream-*") + .build() + ) + ); + assertThat(dataStreamNoFailureStoreOptions.isFailureStoreEffectivelyEnabled(matchingSettings), is(true)); + assertThat(dataStreamNoFailureStoreOptions.isFailureStoreEffectivelyEnabled(nonMatchingSettings), is(false)); + assertThat(dataStreamFailureStoreExplicitlyDisabled.isFailureStoreEffectivelyEnabled(matchingSettings), is(false)); + assertThat(dataStreamFailureStoreExplicitlyDisabled.isFailureStoreEffectivelyEnabled(nonMatchingSettings), is(false)); + assertThat(dataStreamFailureStoreExplicitlyEnabled.isFailureStoreEffectivelyEnabled(matchingSettings), is(true)); + assertThat(dataStreamFailureStoreExplicitlyEnabled.isFailureStoreEffectivelyEnabled(nonMatchingSettings), is(true)); + assertThat(dotPrefixDataStreamNoFailureStoreOptions.isFailureStoreEffectivelyEnabled(matchingSettings), is(false)); + assertThat(dotPrefixDataStreamNoFailureStoreOptions.isFailureStoreEffectivelyEnabled(nonMatchingSettings), is(false)); + assertThat(systemDataStreamNoFailureStoreOptions.isFailureStoreEffectivelyEnabled(matchingSettings), is(false)); + assertThat(systemDataStreamNoFailureStoreOptions.isFailureStoreEffectivelyEnabled(nonMatchingSettings), is(false)); + } + + public void testIsFailureStoreEffectivelyEnabled_staticHelperMethod() { + String regularDataStreamName = "my-data-stream"; + String dotPrefixedDataStreamName = ".my-dot-prefixed-data-stream"; + String systemDataStreamName = "my-system-data-stream-name"; + DataStreamFailureStoreSettings matchingSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put( + DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), + String.join(",", regularDataStreamName, dotPrefixedDataStreamName, systemDataStreamName) + ) + .build() + ) + ); + DataStreamFailureStoreSettings nonMatchingSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), "not-my-data-stream") + .build() + ) + ); + // At time of writing, SystemDataStreamDescriptor does not allow us to declare system data streams which aren't also dot-prefixed. + // But we code defensively to do the system data stream and dot-prefix tests independently, as implied in the requirements. + // We use a mock SystemIndices instance for testing, so that we can make it treat a non-dot-prefixed name as a system data stream. + SystemIndices systemIndices = mock(SystemIndices.class); + when(systemIndices.isSystemDataStream(systemDataStreamName)).thenReturn(true); + + assertThat( + DataStream.isFailureStoreEffectivelyEnabled(DataStreamOptions.EMPTY, matchingSettings, regularDataStreamName, systemIndices), + is(true) + ); + assertThat( + DataStream.isFailureStoreEffectivelyEnabled(DataStreamOptions.EMPTY, nonMatchingSettings, regularDataStreamName, systemIndices), + is(false) + ); + assertThat( + DataStream.isFailureStoreEffectivelyEnabled( + DataStreamOptions.EMPTY, + matchingSettings, + dotPrefixedDataStreamName, + systemIndices + ), + is(false) + ); + assertThat( + DataStream.isFailureStoreEffectivelyEnabled(DataStreamOptions.EMPTY, matchingSettings, systemDataStreamName, systemIndices), + is(false) + ); + assertThat( + DataStream.isFailureStoreEffectivelyEnabled( + DataStreamOptions.FAILURE_STORE_DISABLED, + matchingSettings, + regularDataStreamName, + systemIndices + ), + is(false) + ); + assertThat( + DataStream.isFailureStoreEffectivelyEnabled( + DataStreamOptions.FAILURE_STORE_DISABLED, + nonMatchingSettings, + regularDataStreamName, + systemIndices + ), + is(false) + ); + assertThat( + DataStream.isFailureStoreEffectivelyEnabled( + DataStreamOptions.FAILURE_STORE_ENABLED, + matchingSettings, + regularDataStreamName, + systemIndices + ), + is(true) + ); + assertThat( + DataStream.isFailureStoreEffectivelyEnabled( + DataStreamOptions.FAILURE_STORE_ENABLED, + nonMatchingSettings, + regularDataStreamName, + systemIndices + ), + is(true) + ); + } + private record DataStreamMetadata(Long creationTimeInMillis, Long rolloverTimeInMillis, Long originationTimeInMillis) { public static DataStreamMetadata dataStreamMetadata(Long creationTimeInMillis, Long rolloverTimeInMillis) { return new DataStreamMetadata(creationTimeInMillis, rolloverTimeInMillis, null); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index b7f33151961ea..7a07e407024ce 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -93,6 +93,7 @@ import org.elasticsearch.cluster.coordination.LeaderHeartbeatService; import org.elasticsearch.cluster.coordination.Reconfigurator; import org.elasticsearch.cluster.coordination.StatefulPreVoteCollector; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -2408,7 +2409,8 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { indexNameExpressionResolver, new IndexingPressure(settings), EmptySystemIndices.INSTANCE, - FailureStoreMetrics.NOOP + FailureStoreMetrics.NOOP, + DataStreamFailureStoreSettings.create(ClusterSettings.createBuiltInClusterSettings()) ) ); final TransportShardBulkAction transportShardBulkAction = new TransportShardBulkAction( diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 06e4e486e78b5..c3ce32d4ce333 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -146,13 +146,35 @@ public static DataStream newInstance( boolean replicated, @Nullable DataStreamLifecycle lifecycle, List failureStores + ) { + return newInstance( + name, + indices, + generation, + metadata, + replicated, + lifecycle, + failureStores, + failureStores.isEmpty() ? DataStreamOptions.EMPTY : DataStreamOptions.FAILURE_STORE_ENABLED + ); + } + + public static DataStream newInstance( + String name, + List indices, + long generation, + Map metadata, + boolean replicated, + DataStreamLifecycle lifecycle, + List failureStores, + DataStreamOptions dataStreamOptions ) { return DataStream.builder(name, indices) .setGeneration(generation) .setMetadata(metadata) .setReplicated(replicated) .setLifecycle(lifecycle) - .setDataStreamOptions(failureStores.isEmpty() ? DataStreamOptions.EMPTY : DataStreamOptions.FAILURE_STORE_ENABLED) + .setDataStreamOptions(dataStreamOptions) .setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(failureStores).build()) .build(); } @@ -457,7 +479,7 @@ public static void getClusterStateWithDataStreams( Settings settings, int replicas, boolean replicated, - boolean storeFailures + Boolean storeFailures ) { builder.put( "template_1", @@ -466,7 +488,7 @@ public static void getClusterStateWithDataStreams( .template( Template.builder() .dataStreamOptions( - DataStream.isFailureStoreFeatureFlagEnabled() && storeFailures ? createDataStreamOptionsTemplate(true) : null + DataStream.isFailureStoreFeatureFlagEnabled() ? createDataStreamOptionsTemplate(storeFailures) : null ) ) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) @@ -484,7 +506,7 @@ public static void getClusterStateWithDataStreams( allIndices.addAll(backingIndices); List failureStores = new ArrayList<>(); - if (DataStream.isFailureStoreFeatureFlagEnabled() && storeFailures) { + if (DataStream.isFailureStoreFeatureFlagEnabled() && Boolean.TRUE.equals(storeFailures)) { for (int failureStoreNumber = 1; failureStoreNumber <= dsTuple.v2(); failureStoreNumber++) { failureStores.add( createIndexMetadata( diff --git a/x-pack/plugin/core/src/javaRestTest/java/org/elasticsearch/xpack/core/DataStreamRestIT.java b/x-pack/plugin/core/src/javaRestTest/java/org/elasticsearch/xpack/core/DataStreamRestIT.java index 0b6e187930c01..0fb3144e12040 100644 --- a/x-pack/plugin/core/src/javaRestTest/java/org/elasticsearch/xpack/core/DataStreamRestIT.java +++ b/x-pack/plugin/core/src/javaRestTest/java/org/elasticsearch/xpack/core/DataStreamRestIT.java @@ -9,6 +9,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -51,7 +52,8 @@ public void testDSXpackUsage() throws Exception { assertThat(dataStreams.get("data_streams"), equalTo(0)); assertThat(dataStreams, hasKey("failure_store")); Map failureStoreStats = (Map) dataStreams.get("failure_store"); - assertThat(failureStoreStats.get("enabled_count"), equalTo(0)); + assertThat(failureStoreStats.get("explicitly_enabled_count"), equalTo(0)); + assertThat(failureStoreStats.get("effectively_enabled_count"), equalTo(0)); assertThat(failureStoreStats.get("failure_indices_count"), equalTo(0)); assertBusy(() -> { Map logsTemplate = (Map) ((List) getLocation("/_index_template/logs").get("index_templates")).get(0); @@ -85,8 +87,21 @@ public void testDSXpackUsage() throws Exception { assertThat("got: " + dataStreams, dataStreams.get("data_streams"), equalTo(2)); assertThat("got: " + dataStreams, dataStreams.get("indices_count"), equalTo(3)); failureStoreStats = (Map) dataStreams.get("failure_store"); - assertThat(failureStoreStats.get("enabled_count"), equalTo(1)); + assertThat(failureStoreStats.get("explicitly_enabled_count"), equalTo(1)); + assertThat(failureStoreStats.get("effectively_enabled_count"), equalTo(1)); assertThat(failureStoreStats.get("failure_indices_count"), equalTo(1)); + + // Enable the failure store for logs-mysql-default using the cluster setting... + updateClusterSettings( + Settings.builder() + .put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), "logs-mysql-default") + .build() + ); + // ...and assert that it counts towards effectively_enabled_count but not explicitly_enabled_count: + dataStreams = (Map) getLocation("/_xpack/usage").get("data_streams"); + failureStoreStats = (Map) dataStreams.get("failure_store"); + assertThat(failureStoreStats.get("explicitly_enabled_count"), equalTo(1)); + assertThat(failureStoreStats.get("effectively_enabled_count"), equalTo(2)); } Map getLocation(String path) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamUsageTransportAction.java index 26f3bdd7654ca..7eece9177cf2b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamUsageTransportAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.injection.guice.Inject; @@ -24,13 +25,16 @@ public class DataStreamUsageTransportAction extends XPackUsageFeatureTransportAction { + private final DataStreamFailureStoreSettings dataStreamFailureStoreSettings; + @Inject public DataStreamUsageTransportAction( TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { super( XPackUsageFeatureAction.DATA_STREAMS.name(), @@ -40,6 +44,7 @@ public DataStreamUsageTransportAction( actionFilters, indexNameExpressionResolver ); + this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings; } @Override @@ -51,13 +56,17 @@ protected void masterOperation( ) { final Map dataStreams = state.metadata().dataStreams(); long backingIndicesCounter = 0; - long failureStoreEnabledCounter = 0; + long failureStoreExplicitlyEnabledCounter = 0; + long failureStoreEffectivelyEnabledCounter = 0; long failureIndicesCounter = 0; for (DataStream ds : dataStreams.values()) { backingIndicesCounter += ds.getIndices().size(); if (DataStream.isFailureStoreFeatureFlagEnabled()) { - if (ds.isFailureStoreEnabled()) { - failureStoreEnabledCounter++; + if (ds.isFailureStoreExplicitlyEnabled()) { + failureStoreExplicitlyEnabledCounter++; + } + if (ds.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings)) { + failureStoreEffectivelyEnabledCounter++; } if (ds.getFailureIndices().getIndices().isEmpty() == false) { failureIndicesCounter += ds.getFailureIndices().getIndices().size(); @@ -67,7 +76,8 @@ protected void masterOperation( final DataStreamFeatureSetUsage.DataStreamStats stats = new DataStreamFeatureSetUsage.DataStreamStats( dataStreams.size(), backingIndicesCounter, - failureStoreEnabledCounter, + failureStoreExplicitlyEnabledCounter, + failureStoreEffectivelyEnabledCounter, failureIndicesCounter ); final DataStreamFeatureSetUsage usage = new DataStreamFeatureSetUsage(stats); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java index 1a964f3c57dbb..f8cb9b913b4ae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java @@ -52,7 +52,8 @@ protected void innerXContent(XContentBuilder builder, Params params) throws IOEx builder.field("indices_count", streamStats.indicesBehindDataStream); if (DataStream.isFailureStoreFeatureFlagEnabled()) { builder.startObject("failure_store"); - builder.field("enabled_count", streamStats.failureStoreEnabledDataStreamCount); + builder.field("explicitly_enabled_count", streamStats.failureStoreExplicitlyEnabledDataStreamCount); + builder.field("effectively_enabled_count", streamStats.failureStoreEffectivelyEnabledDataStreamCount); builder.field("failure_indices_count", streamStats.failureStoreIndicesCount); builder.endObject(); } @@ -83,7 +84,8 @@ public boolean equals(Object obj) { public record DataStreamStats( long totalDataStreamCount, long indicesBehindDataStream, - long failureStoreEnabledDataStreamCount, + long failureStoreExplicitlyEnabledDataStreamCount, + long failureStoreEffectivelyEnabledDataStreamCount, long failureStoreIndicesCount ) implements Writeable { @@ -92,6 +94,7 @@ public DataStreamStats(StreamInput in) throws IOException { in.readVLong(), in.readVLong(), in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? in.readVLong() : 0, + in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_ENABLED_BY_CLUSTER_SETTING) ? in.readVLong() : 0, in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? in.readVLong() : 0 ); } @@ -101,7 +104,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(this.totalDataStreamCount); out.writeVLong(this.indicesBehindDataStream); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { - out.writeVLong(this.failureStoreEnabledDataStreamCount); + out.writeVLong(this.failureStoreExplicitlyEnabledDataStreamCount); + if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_ENABLED_BY_CLUSTER_SETTING)) { + out.writeVLong(failureStoreEffectivelyEnabledDataStreamCount); + } out.writeVLong(this.failureStoreIndicesCount); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/searchablesnapshots/DataStreamFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/searchablesnapshots/DataStreamFeatureSetUsageTests.java index 3ff36c52229e7..5f879d508a52b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/searchablesnapshots/DataStreamFeatureSetUsageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/searchablesnapshots/DataStreamFeatureSetUsageTests.java @@ -20,6 +20,7 @@ protected DataStreamFeatureSetUsage createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong() ) ); From a2e035eeccf2ff9db9d790d8ae3a20682e519cb9 Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Thu, 19 Dec 2024 09:56:37 -0700 Subject: [PATCH 57/62] Consolidate randomIncludeCCSMetadata to a single method (#119037) --- .../AbstractEnrichBasedCrossClusterTestCase.java | 10 ---------- .../esql/action/AbstractEsqlIntegTestCase.java | 15 +++++++++++++++ .../esql/action/CrossClusterAsyncQueryIT.java | 15 +-------------- .../CrossClusterEnrichUnavailableClustersIT.java | 13 +++++++------ .../CrossClusterQueryUnavailableRemotesIT.java | 15 +-------------- .../xpack/esql/action/CrossClustersEnrichIT.java | 1 + .../CrossClustersQueriesWithInvalidLicenseIT.java | 1 + .../xpack/esql/action/CrossClustersQueryIT.java | 15 +-------------- 8 files changed, 27 insertions(+), 58 deletions(-) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java index 66ac32b33cd4d..4d1f908702152 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java @@ -14,7 +14,6 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.core.Tuple; import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.license.LicenseService; @@ -244,15 +243,6 @@ protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); } - public static Tuple randomIncludeCCSMetadata() { - return switch (randomIntBetween(1, 3)) { - case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); - case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); - case 3 -> new Tuple<>(null, Boolean.FALSE); - default -> throw new AssertionError("should not get here"); - }; - } - public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin { public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception { super(settings, configPath); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java index 7ae45497f7297..90bf34b499390 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java @@ -19,6 +19,7 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -233,4 +234,18 @@ protected static void assertValuesInAnyOrder(Iterator> actualVa } assertThat(getValuesList(actualValues), containsInAnyOrder(items.toArray())); } + + /** + * v1: value to send to runQuery (can be null; null means use default value) + * v2: whether to expect CCS Metadata in the response (cannot be null) + * @return + */ + public static Tuple randomIncludeCCSMetadata() { + return switch (randomIntBetween(1, 3)) { + case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); + case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); + case 3 -> new Tuple<>(null, Boolean.FALSE); + default -> throw new AssertionError("should not get here"); + }; + } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java index 3926ea4c27a3d..79ac8816a0039 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java @@ -43,6 +43,7 @@ import static org.elasticsearch.core.TimeValue.timeValueMillis; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomIncludeCCSMetadata; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -355,20 +356,6 @@ private static void assertClusterMetadataInResponse(EsqlQueryResponse resp, bool } } - /** - * v1: value to send to runQuery (can be null; null means use default value) - * v2: whether to expect CCS Metadata in the response (cannot be null) - * @return - */ - public static Tuple randomIncludeCCSMetadata() { - return switch (randomIntBetween(1, 3)) { - case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); - case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); - case 3 -> new Tuple<>(null, Boolean.FALSE); - default -> throw new AssertionError("should not get here"); - }; - } - Map setupClusters(int numClusters) throws IOException { assert numClusters == 2 || numClusters == 3 : "2 or 3 clusters supported not: " + numClusters; int numShardsLocal = randomIntBetween(1, 5); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java index 09ad97b08f357..9c0447affc754 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java @@ -23,6 +23,7 @@ import java.util.Set; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomIncludeCCSMetadata; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -60,7 +61,7 @@ public void testEnrichWithHostsPolicyAndDisconnectedRemotesWithSkipUnavailableTr // close remote-cluster-1 so that it is unavailable cluster(REMOTE_CLUSTER_1).close(); - Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); @@ -142,7 +143,7 @@ public void testEnrichWithHostsPolicyAndDisconnectedRemotesWithSkipUnavailableFa // close remote-cluster-1 so that it is unavailable cluster(REMOTE_CLUSTER_1).close(); - Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); @@ -188,7 +189,7 @@ public void testEnrichWithHostsPolicyAndDisconnectedRemotesWithSkipUnavailableFa } public void testEnrichTwiceThenAggsWithUnavailableRemotes() throws IOException { - Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); @@ -292,7 +293,7 @@ public void testEnrichTwiceThenAggsWithUnavailableRemotes() throws IOException { } public void testEnrichCoordinatorThenAnyWithSingleUnavailableRemoteAndLocal() throws IOException { - Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); @@ -345,7 +346,7 @@ public void testEnrichCoordinatorThenAnyWithSingleUnavailableRemoteAndLocal() th } public void testEnrichCoordinatorThenAnyWithSingleUnavailableRemoteAndNotLocal() throws IOException { - Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); @@ -394,7 +395,7 @@ public void testEnrichCoordinatorThenAnyWithSingleUnavailableRemoteAndNotLocal() } public void testEnrichRemoteWithVendor() throws IOException { - Tuple includeCCSMetadata = CrossClustersEnrichIT.randomIncludeCCSMetadata(); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java index f65764daafb8a..eb728895cd00c 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java @@ -30,6 +30,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomIncludeCCSMetadata; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; @@ -452,20 +453,6 @@ protected EsqlQueryResponse runQuery(EsqlQueryRequest request) { return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); } - /** - * v1: value to send to runQuery (can be null; null means use default value) - * v2: whether to expect CCS Metadata in the response (cannot be null) - * @return - */ - public static Tuple randomIncludeCCSMetadata() { - return switch (randomIntBetween(1, 3)) { - case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); - case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); - case 3 -> new Tuple<>(null, Boolean.FALSE); - default -> throw new AssertionError("should not get here"); - }; - } - Map setupClusters(int numClusters) { assert numClusters == 2 || numClusters == 3 : "2 or 3 clusters supported not: " + numClusters; String localIndex = "logs-1"; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java index 4e6be6cc2bf74..51ad4a0d2053f 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -22,6 +22,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomIncludeCCSMetadata; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java index 1ed42b696d65e..a9c8190976a02 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java @@ -20,6 +20,7 @@ import java.util.Set; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomIncludeCCSMetadata; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index 347ef419cab9b..6cfc42523007e 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -48,6 +48,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomIncludeCCSMetadata; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -1251,20 +1252,6 @@ protected EsqlQueryResponse runQuery(EsqlQueryRequest request) { return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); } - /** - * v1: value to send to runQuery (can be null; null means use default value) - * v2: whether to expect CCS Metadata in the response (cannot be null) - * @return - */ - public static Tuple randomIncludeCCSMetadata() { - return switch (randomIntBetween(1, 3)) { - case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); - case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); - case 3 -> new Tuple<>(null, Boolean.FALSE); - default -> throw new AssertionError("should not get here"); - }; - } - void waitForNoInitializingShards(Client client, TimeValue timeout, String... indices) { ClusterHealthResponse resp = client.admin() .cluster() From 4007ff3b44269d2f48e5aa938cf621fbd0123df9 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Thu, 19 Dec 2024 17:58:06 +0100 Subject: [PATCH 58/62] ESQL: allow DATE_PARSE to read the timezones (#118603) This just removes fixing a formatter to a timezone (UTC), allowing `DATE_PARSE` to correctly read timezones. Fixes #117680. --- docs/changelog/118603.yaml | 6 +++ .../src/main/resources/date.csv-spec | 54 +++++++++++++++++++ .../scalar/date/DateParseEvaluator.java | 21 +++----- .../xpack/esql/action/EsqlCapabilities.java | 5 ++ .../function/scalar/date/DateParse.java | 15 +++--- .../function/scalar/date/DateParseTests.java | 28 +++++++--- 6 files changed, 100 insertions(+), 29 deletions(-) create mode 100644 docs/changelog/118603.yaml diff --git a/docs/changelog/118603.yaml b/docs/changelog/118603.yaml new file mode 100644 index 0000000000000..d61619adfa5f6 --- /dev/null +++ b/docs/changelog/118603.yaml @@ -0,0 +1,6 @@ +pr: 118603 +summary: Allow DATE_PARSE to read the timezones +area: ES|QL +type: bug +issues: + - 117680 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 734e2ef5e475e..22ff3192cc716 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -494,6 +494,60 @@ b:datetime null ; +evalDateParseWithTimezone +required_capability: date_parse_tz +row s = "12/Jul/2022:10:24:10 +0900" | eval d = date_parse("dd/MMM/yyyy:HH:mm:ss Z", s); + +s:keyword | d:datetime +12/Jul/2022:10:24:10 +0900 | 2022-07-12T01:24:10.000Z +; + +evalDateParseWithTimezoneCrossingDayBoundary +required_capability: date_parse_tz +row s = "12/Jul/2022:08:24:10 +0900" | eval d = date_parse("dd/MMM/yyyy:HH:mm:ss Z", s); + +s:keyword | d:datetime +12/Jul/2022:08:24:10 +0900 | 2022-07-11T23:24:10.000Z +; + +evalDateParseWithTimezone2 +required_capability: date_parse_tz +row s1 = "12/Jul/2022:10:24:10 +0900", s2 = "2022/12/07 09:24:10 +0800" +| eval d1 = date_parse("dd/MMM/yyyy:HH:mm:ss Z", s1), d2 = date_parse("yyyy/dd/MM HH:mm:ss Z", s2) +| eval eq = d1 == d2 +| keep d1, eq +; + +d1:datetime | eq:boolean +2022-07-12T01:24:10.000Z | true +; + +evalDateParseWithAndWithoutTimezone +required_capability: date_parse_tz +row s = "2022/12/07 09:24:10", format="yyyy/dd/MM HH:mm:ss" +| eval no_tz = date_parse(format, s) +| eval with_tz = date_parse(concat(format, " Z"), concat(s, " +0900")) +| keep s, no_tz, with_tz +; + +s:keyword | no_tz:datetime | with_tz:datetime +2022/12/07 09:24:10 | 2022-07-12T09:24:10.000Z | 2022-07-12T00:24:10.000Z +; + +evalDateParseWithOtherTimezoneSpecifiers +required_capability: date_parse_tz +row s = "2022/12/07 09:24:10", format="yyyy/dd/MM HH:mm:ss" +| eval with_tz1 = date_parse(concat(format, " Z"), concat(s, " +0900")) +| eval with_tz2 = date_parse(concat(format, " x"), concat(s, " +09")) +| eval with_tz3 = date_parse(concat(format, " X"), concat(s, " +0900")) +| eval with_tz4 = date_parse(concat(format, " O"), concat(s, " GMT+9")) +| keep s, with_tz* +; + +s:keyword | with_tz1:datetime | with_tz2:datetime | with_tz3:datetime | with_tz4:datetime +2022/12/07 09:24:10 | 2022-07-12T00:24:10.000Z | 2022-07-12T00:24:10.000Z | 2022-07-12T00:24:10.000Z | 2022-07-12T00:24:10.000Z +; + evalDateParseDynamic from employees | where emp_no == 10039 or emp_no == 10040 | sort emp_no | eval birth_date_string = date_format("yyyy-MM-dd", birth_date) diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseEvaluator.java index 3ea782931a0a3..6c432855e38fb 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseEvaluator.java @@ -7,7 +7,6 @@ import java.lang.IllegalArgumentException; import java.lang.Override; import java.lang.String; -import java.time.ZoneId; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; @@ -31,18 +30,15 @@ public final class DateParseEvaluator implements EvalOperator.ExpressionEvaluato private final EvalOperator.ExpressionEvaluator formatter; - private final ZoneId zoneId; - private final DriverContext driverContext; private Warnings warnings; public DateParseEvaluator(Source source, EvalOperator.ExpressionEvaluator val, - EvalOperator.ExpressionEvaluator formatter, ZoneId zoneId, DriverContext driverContext) { + EvalOperator.ExpressionEvaluator formatter, DriverContext driverContext) { this.source = source; this.val = val; this.formatter = formatter; - this.zoneId = zoneId; this.driverContext = driverContext; } @@ -91,7 +87,7 @@ public LongBlock eval(int positionCount, BytesRefBlock valBlock, BytesRefBlock f continue position; } try { - result.appendLong(DateParse.process(valBlock.getBytesRef(valBlock.getFirstValueIndex(p), valScratch), formatterBlock.getBytesRef(formatterBlock.getFirstValueIndex(p), formatterScratch), this.zoneId)); + result.appendLong(DateParse.process(valBlock.getBytesRef(valBlock.getFirstValueIndex(p), valScratch), formatterBlock.getBytesRef(formatterBlock.getFirstValueIndex(p), formatterScratch))); } catch (IllegalArgumentException e) { warnings().registerException(e); result.appendNull(); @@ -108,7 +104,7 @@ public LongBlock eval(int positionCount, BytesRefVector valVector, BytesRef formatterScratch = new BytesRef(); position: for (int p = 0; p < positionCount; p++) { try { - result.appendLong(DateParse.process(valVector.getBytesRef(p, valScratch), formatterVector.getBytesRef(p, formatterScratch), this.zoneId)); + result.appendLong(DateParse.process(valVector.getBytesRef(p, valScratch), formatterVector.getBytesRef(p, formatterScratch))); } catch (IllegalArgumentException e) { warnings().registerException(e); result.appendNull(); @@ -120,7 +116,7 @@ public LongBlock eval(int positionCount, BytesRefVector valVector, @Override public String toString() { - return "DateParseEvaluator[" + "val=" + val + ", formatter=" + formatter + ", zoneId=" + zoneId + "]"; + return "DateParseEvaluator[" + "val=" + val + ", formatter=" + formatter + "]"; } @Override @@ -147,24 +143,21 @@ static class Factory implements EvalOperator.ExpressionEvaluator.Factory { private final EvalOperator.ExpressionEvaluator.Factory formatter; - private final ZoneId zoneId; - public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val, - EvalOperator.ExpressionEvaluator.Factory formatter, ZoneId zoneId) { + EvalOperator.ExpressionEvaluator.Factory formatter) { this.source = source; this.val = val; this.formatter = formatter; - this.zoneId = zoneId; } @Override public DateParseEvaluator get(DriverContext context) { - return new DateParseEvaluator(source, val.get(context), formatter.get(context), zoneId, context); + return new DateParseEvaluator(source, val.get(context), formatter.get(context), context); } @Override public String toString() { - return "DateParseEvaluator[" + "val=" + val + ", formatter=" + formatter + ", zoneId=" + zoneId + "]"; + return "DateParseEvaluator[" + "val=" + val + ", formatter=" + formatter + "]"; } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index a6e0f1d89c364..b1b11ccb09c86 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -380,6 +380,11 @@ public enum Cap { */ DATE_NANOS_AGGREGATIONS(), + /** + * DATE_PARSE supports reading timezones + */ + DATE_PARSE_TZ(), + /** * Support for datetime in least and greatest functions */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java index 1aaa227c3846e..e09fabab98d0f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java @@ -28,14 +28,12 @@ import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import java.io.IOException; -import java.time.ZoneId; import java.util.List; import static org.elasticsearch.common.time.DateFormatter.forPattern; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; -import static org.elasticsearch.xpack.esql.core.util.DateUtils.UTC; import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.DEFAULT_DATE_TIME_FORMATTER; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToLong; @@ -130,13 +128,12 @@ public static long process(BytesRef val, @Fixed DateFormatter formatter) throws } @Evaluator(warnExceptions = { IllegalArgumentException.class }) - static long process(BytesRef val, BytesRef formatter, @Fixed ZoneId zoneId) throws IllegalArgumentException { - return dateTimeToLong(val.utf8ToString(), toFormatter(formatter, zoneId)); + static long process(BytesRef val, BytesRef formatter) throws IllegalArgumentException { + return dateTimeToLong(val.utf8ToString(), toFormatter(formatter)); } @Override public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { - ZoneId zone = UTC; // TODO session timezone? ExpressionEvaluator.Factory fieldEvaluator = toEvaluator.apply(field); if (format == null) { return new DateParseConstantEvaluator.Factory(source(), fieldEvaluator, DEFAULT_DATE_TIME_FORMATTER); @@ -146,18 +143,18 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { } if (format.foldable()) { try { - DateFormatter formatter = toFormatter(format.fold(), zone); + DateFormatter formatter = toFormatter(format.fold()); return new DateParseConstantEvaluator.Factory(source(), fieldEvaluator, formatter); } catch (IllegalArgumentException e) { throw new InvalidArgumentException(e, "invalid date pattern for [{}]: {}", sourceText(), e.getMessage()); } } ExpressionEvaluator.Factory formatEvaluator = toEvaluator.apply(format); - return new DateParseEvaluator.Factory(source(), fieldEvaluator, formatEvaluator, zone); + return new DateParseEvaluator.Factory(source(), fieldEvaluator, formatEvaluator); } - private static DateFormatter toFormatter(Object format, ZoneId zone) { - return forPattern(((BytesRef) format).utf8ToString()).withZone(zone); + private static DateFormatter toFormatter(Object format) { + return forPattern(((BytesRef) format).utf8ToString()); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java index 8da01fc1989ba..04683ecb65467 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; +import java.util.Locale; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -46,11 +47,26 @@ public static Iterable parameters() { new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0]]", DataType.DATETIME, equalTo(1683244800000L) ) ), + new TestCaseSupplier("Timezoned Case", List.of(DataType.KEYWORD, DataType.KEYWORD), () -> { + long ts_sec = 1657585450L; // 2022-07-12T00:24:10Z + int hours = randomIntBetween(0, 23); + String date = String.format(Locale.ROOT, "12/Jul/2022:%02d:24:10 +0900", hours); + long expected_ts = (ts_sec + (hours - 9) * 3600L) * 1000L; + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("dd/MMM/yyyy:HH:mm:ss Z"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef(date), DataType.KEYWORD, "second") + ), + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0]]", + DataType.DATETIME, + equalTo(expected_ts) + ); + }), new TestCaseSupplier( "With Text", List.of(DataType.KEYWORD, DataType.TEXT), @@ -59,7 +75,7 @@ public static Iterable parameters() { new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0]]", DataType.DATETIME, equalTo(1683244800000L) ) @@ -72,7 +88,7 @@ public static Iterable parameters() { new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0]]", DataType.DATETIME, equalTo(1683244800000L) ) @@ -85,7 +101,7 @@ public static Iterable parameters() { new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0]]", DataType.DATETIME, equalTo(1683244800000L) ) @@ -98,7 +114,7 @@ public static Iterable parameters() { new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0]]", DataType.DATETIME, is(nullValue()) ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") @@ -118,7 +134,7 @@ public static Iterable parameters() { new TestCaseSupplier.TypedData(new BytesRef("not a date"), DataType.KEYWORD, "second") ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0]]", DataType.DATETIME, is(nullValue()) ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") From 21bcc314c44632c720137307bae77ce8d300d4a4 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 19 Dec 2024 08:59:23 -0800 Subject: [PATCH 59/62] Limit which classes are retransformed (#118786) During entitlements initialization the transformer is added to instrumenation after some classes are already loaded. Currently we end up force loading (though not initializing) all classes that want to transform. This commit simplifies the retransform to only apply to classes which we know are already loaded by the jdk, which Instrumentation provides. --- .../EntitlementInitialization.java | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index 8e4cddc4d63ee..c2ee935e0e5f3 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -34,6 +34,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -72,17 +73,17 @@ public static void initialize(Instrumentation inst) throws Exception { Instrumenter instrumenter = INSTRUMENTER_FACTORY.newInstrumenter(EntitlementChecker.class, checkMethods); inst.addTransformer(new Transformer(instrumenter, classesToTransform), true); - // TODO: should we limit this array somehow? - var classesToRetransform = classesToTransform.stream().map(EntitlementInitialization::internalNameToClass).toArray(Class[]::new); - inst.retransformClasses(classesToRetransform); + inst.retransformClasses(findClassesToRetransform(inst.getAllLoadedClasses(), classesToTransform)); } - private static Class internalNameToClass(String internalName) { - try { - return Class.forName(internalName.replace('/', '.'), false, ClassLoader.getPlatformClassLoader()); - } catch (ClassNotFoundException e) { - throw new RuntimeException(e); + private static Class[] findClassesToRetransform(Class[] loadedClasses, Set classesToTransform) { + List> retransform = new ArrayList<>(); + for (Class loadedClass : loadedClasses) { + if (classesToTransform.contains(loadedClass.getName().replace(".", "/"))) { + retransform.add(loadedClass); + } } + return retransform.toArray(new Class[0]); } private static PolicyManager createPolicyManager() throws IOException { From 49ebd42408ccbf0e5212ee52a678778309dbfac6 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 19 Dec 2024 18:05:29 +0100 Subject: [PATCH 60/62] Lower default mappings parsing compatibility to MINIMUM_READONLY_COMPATIBLE (#119017) Legacy indices check what minimum compatibility version the type parser supports, for every field type found in the mappings. Only some of the basic fields are supported, otherwise a placeholder mapper is created in place of the real field. The minimum supported version is v5 for the supported field mappers. For all the others, we can lower that from MINIMUM_COMPATIBLE to MINIMUM_READONLY_COMPATIBLE. This commit also centralizes the creation of type parsers that declare support for archive indices, as they all declare the same version. --- .../index/mapper/BooleanFieldMapper.java | 7 ++----- .../index/mapper/DateFieldMapper.java | 10 ++++------ .../elasticsearch/index/mapper/FieldMapper.java | 16 +++++++++++----- .../index/mapper/GeoPointFieldMapper.java | 7 ++----- .../index/mapper/IpFieldMapper.java | 6 ++---- .../index/mapper/KeywordFieldMapper.java | 4 +--- .../org/elasticsearch/index/mapper/Mapper.java | 2 +- .../index/mapper/MapperRegistry.java | 8 ++++++-- .../index/mapper/NumberFieldMapper.java | 14 ++------------ .../index/mapper/TextFieldMapper.java | 7 ++----- 10 files changed, 33 insertions(+), 48 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index f74d58093a7f5..a0e06bafb4050 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -182,11 +182,8 @@ private FieldValues scriptValues() { } } - private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); - - public static final TypeParser PARSER = new TypeParser( - (n, c) -> new Builder(n, c.scriptCompiler(), IGNORE_MALFORMED_SETTING.get(c.getSettings()), c.indexVersionCreated()), - MINIMUM_COMPATIBILITY_VERSION + public static final TypeParser PARSER = createTypeParserWithLegacySupport( + (n, c) -> new Builder(n, c.scriptCompiler(), IGNORE_MALFORMED_SETTING.get(c.getSettings()), c.indexVersionCreated()) ); public static final class BooleanFieldType extends TermBasedFieldType { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 39744cbd39f7c..fb17f1c7ebaf8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -398,9 +398,7 @@ public DateFieldMapper build(MapperBuilderContext context) { } } - private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); - - public static final TypeParser MILLIS_PARSER = new TypeParser((n, c) -> { + public static final TypeParser MILLIS_PARSER = createTypeParserWithLegacySupport((n, c) -> { boolean ignoreMalformedByDefault = IGNORE_MALFORMED_SETTING.get(c.getSettings()); return new Builder( n, @@ -410,9 +408,9 @@ public DateFieldMapper build(MapperBuilderContext context) { ignoreMalformedByDefault, c.indexVersionCreated() ); - }, MINIMUM_COMPATIBILITY_VERSION); + }); - public static final TypeParser NANOS_PARSER = new TypeParser((n, c) -> { + public static final TypeParser NANOS_PARSER = createTypeParserWithLegacySupport((n, c) -> { boolean ignoreMalformedByDefault = IGNORE_MALFORMED_SETTING.get(c.getSettings()); return new Builder( n, @@ -422,7 +420,7 @@ public DateFieldMapper build(MapperBuilderContext context) { ignoreMalformedByDefault, c.indexVersionCreated() ); - }, MINIMUM_COMPATIBILITY_VERSION); + }); public static final class DateFieldType extends MappedFieldType { final DateFormatter dateTimeFormatter; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 7238127571fed..ffd60efc772f9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -1618,6 +1618,12 @@ public static BiConsumer notFromDynamicTemplates(S }; } + private static final IndexVersion MINIMUM_LEGACY_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); + + public static TypeParser createTypeParserWithLegacySupport(BiFunction builderFunction) { + return new TypeParser(builderFunction, MINIMUM_LEGACY_COMPATIBILITY_VERSION); + } + /** * TypeParser implementation that automatically handles parsing */ @@ -1632,14 +1638,14 @@ public static final class TypeParser implements Mapper.TypeParser { * @param builderFunction a function that produces a Builder from a name and parsercontext */ public TypeParser(BiFunction builderFunction) { - this(builderFunction, (n, c) -> {}, IndexVersions.MINIMUM_COMPATIBLE); + this(builderFunction, (n, c) -> {}, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } /** - * Variant of {@link #TypeParser(BiFunction)} that allows to defining a minimumCompatibilityVersion to + * Variant of {@link #TypeParser(BiFunction)} that allows to define a minimumCompatibilityVersion to * allow parsing mapping definitions of legacy indices (see {@link Mapper.TypeParser#supportsVersion(IndexVersion)}). */ - public TypeParser(BiFunction builderFunction, IndexVersion minimumCompatibilityVersion) { + private TypeParser(BiFunction builderFunction, IndexVersion minimumCompatibilityVersion) { this(builderFunction, (n, c) -> {}, minimumCompatibilityVersion); } @@ -1647,14 +1653,14 @@ public TypeParser( BiFunction builderFunction, BiConsumer contextValidator ) { - this(builderFunction, contextValidator, IndexVersions.MINIMUM_COMPATIBLE); + this(builderFunction, contextValidator, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } public TypeParser( BiFunction builderFunction, List> contextValidator ) { - this(builderFunction, (n, c) -> contextValidator.forEach(v -> v.accept(n, c)), IndexVersions.MINIMUM_COMPATIBLE); + this(builderFunction, (n, c) -> contextValidator.forEach(v -> v.accept(n, c)), IndexVersions.MINIMUM_READONLY_COMPATIBLE); } private TypeParser( diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 161e1ba84aa40..0824e5381ff65 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -228,17 +228,14 @@ public FieldMapper build(MapperBuilderContext context) { } - private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); - - public static TypeParser PARSER = new TypeParser( + public static TypeParser PARSER = createTypeParserWithLegacySupport( (n, c) -> new Builder( n, c.scriptCompiler(), IGNORE_MALFORMED_SETTING.get(c.getSettings()), c.indexVersionCreated(), c.getIndexSettings().getMode() - ), - MINIMUM_COMPATIBILITY_VERSION + ) ); private final Builder builder; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 2f64955b48627..0204a2b10013a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -204,12 +204,10 @@ public IpFieldMapper build(MapperBuilderContext context) { } - private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); - - public static final TypeParser PARSER = new TypeParser((n, c) -> { + public static final TypeParser PARSER = createTypeParserWithLegacySupport((n, c) -> { boolean ignoreMalformedByDefault = IGNORE_MALFORMED_SETTING.get(c.getSettings()); return new Builder(n, c.scriptCompiler(), ignoreMalformedByDefault, c.indexVersionCreated()); - }, MINIMUM_COMPATIBILITY_VERSION); + }); public static final class IpFieldType extends SimpleMappedFieldType { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 32aa422b18bcc..e970789225059 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -385,9 +385,7 @@ public KeywordFieldMapper build(MapperBuilderContext context) { } } - private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); - - public static final TypeParser PARSER = new TypeParser(Builder::new, MINIMUM_COMPATIBILITY_VERSION); + public static final TypeParser PARSER = createTypeParserWithLegacySupport(Builder::new); public static final class KeywordFieldType extends StringFieldType { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 6bc63bdbcceaf..5cbdffc28ba74 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -131,7 +131,7 @@ public interface TypeParser { * Whether we can parse this type on indices with the given index created version. */ default boolean supportsVersion(IndexVersion indexCreatedVersion) { - return indexCreatedVersion.onOrAfter(IndexVersions.MINIMUM_COMPATIBLE); + return indexCreatedVersion.onOrAfter(IndexVersions.MINIMUM_READONLY_COMPATIBLE); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java index ea94576f5c536..44f7def74ec0e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java @@ -57,9 +57,13 @@ public MapperRegistry( */ public Mapper.TypeParser getMapperParser(String type, IndexVersion indexVersionCreated) { Mapper.TypeParser parser = mapperParsers.get(type); - if (indexVersionCreated.isLegacyIndexVersion() && (parser == null || parser.supportsVersion(indexVersionCreated) == false)) { - return PlaceHolderFieldMapper.PARSER.apply(type); + if (indexVersionCreated.isLegacyIndexVersion()) { + if (parser == null || parser.supportsVersion(indexVersionCreated) == false) { + return PlaceHolderFieldMapper.PARSER.apply(type); + } + return parser; } else { + assert parser == null || parser.supportsVersion(indexVersionCreated); return parser; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 8c21dfea31b9a..1b8fb3dfc101c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -89,8 +89,6 @@ private static NumberFieldMapper toType(FieldMapper in) { return (NumberFieldMapper) in; } - private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); - public static final class Builder extends FieldMapper.DimensionBuilder { private final Parameter indexed; @@ -1378,16 +1376,8 @@ private boolean isOutOfRange(Object value) { NumberType(String name, NumericType numericType) { this.name = name; this.numericType = numericType; - this.parser = new TypeParser( - (n, c) -> new Builder( - n, - this, - c.scriptCompiler(), - c.getSettings(), - c.indexVersionCreated(), - c.getIndexSettings().getMode() - ), - MINIMUM_COMPATIBILITY_VERSION + this.parser = createTypeParserWithLegacySupport( + (n, c) -> new Builder(n, this, c.scriptCompiler(), c.getSettings(), c.indexVersionCreated(), c.getIndexSettings().getMode()) ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index cf75f1ddf3b94..b7323f2c0e3e4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -480,11 +480,8 @@ public TextFieldMapper build(MapperBuilderContext context) { } } - private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); - - public static final TypeParser PARSER = new TypeParser( - (n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers(), SourceFieldMapper.isSynthetic(c.getIndexSettings())), - MINIMUM_COMPATIBILITY_VERSION + public static final TypeParser PARSER = createTypeParserWithLegacySupport( + (n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers(), SourceFieldMapper.isSynthetic(c.getIndexSettings())) ); private static class PhraseWrappedAnalyzer extends AnalyzerWrapper { From 0a7d50dc0ebbd40bc4ea3726aacd0314ff87958e Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 19 Dec 2024 18:05:42 +0100 Subject: [PATCH 61/62] Remove version field from CachedBlob (#119047) --- .../xpack/searchablesnapshots/cache/blob/CachedBlob.java | 6 ------ 1 file changed, 6 deletions(-) diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/CachedBlob.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/CachedBlob.java index a434ac39f59e9..bacd0c750f525 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/CachedBlob.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/CachedBlob.java @@ -7,10 +7,8 @@ package org.elasticsearch.xpack.searchablesnapshots.cache.blob; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -60,14 +58,10 @@ private CachedBlob(Instant creationTime, String repository, String name, String @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) - // we can remove the version field when we no longer need to keep compatibility with <8.12 - final int version = Version.CURRENT.id; builder.startObject(); { builder.field("type", TYPE); builder.field(CREATION_TIME_FIELD, creationTime.toEpochMilli()); - builder.field("version", version); builder.field("repository", repository); builder.startObject("blob"); { From 6983f9ab5a0cb5dd7300a95bdb31493889999193 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 19 Dec 2024 18:06:00 +0100 Subject: [PATCH 62/62] Address shape query builders testToQuery failures (#119096) With broadening of index versions tested in AbstractQueryBuilder, we need to restore compatibility with 7x index versions that has been removed. Closes #119090 Closes #119091 --- muted-tests.yml | 6 ----- .../GeoShapeQueryBuilderGeoShapeTests.java | 25 ++++++++++++++----- .../ShapeQueryBuilderOverShapeTests.java | 15 ++++++++--- 3 files changed, 31 insertions(+), 15 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 34b8f1bd18d52..9f1550b1cb5cc 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -302,12 +302,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/118414 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlClientYamlIT issue: https://github.com/elastic/elasticsearch/issues/119086 -- class: org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilderOverShapeTests - method: testToQuery - issue: https://github.com/elastic/elasticsearch/issues/119090 -- class: org.elasticsearch.xpack.spatial.index.query.GeoShapeQueryBuilderGeoShapeTests - method: testToQuery - issue: https://github.com/elastic/elasticsearch/issues/119091 # Examples: # diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/GeoShapeQueryBuilderGeoShapeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/GeoShapeQueryBuilderGeoShapeTests.java index 405ef5c480687..e5212a804e057 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/GeoShapeQueryBuilderGeoShapeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/GeoShapeQueryBuilderGeoShapeTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.ShapeType; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.GeoShapeQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; @@ -87,15 +88,27 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { } if (ESTestCase.randomBoolean()) { SearchExecutionContext context = AbstractBuilderTestCase.createSearchExecutionContext(); - if (shapeType == ShapeType.LINESTRING || shapeType == ShapeType.MULTILINESTRING) { - builder.relation(ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); + if (context.indexVersionCreated().onOrAfter(IndexVersions.V_7_5_0)) { // CONTAINS is only supported from version 7.5 + if (shapeType == ShapeType.LINESTRING || shapeType == ShapeType.MULTILINESTRING) { + builder.relation(ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); + } else { + builder.relation( + ESTestCase.randomFrom( + ShapeRelation.DISJOINT, + ShapeRelation.INTERSECTS, + ShapeRelation.WITHIN, + ShapeRelation.CONTAINS + ) + ); + } } else { - builder.relation( - ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) - ); + if (shapeType == ShapeType.LINESTRING || shapeType == ShapeType.MULTILINESTRING) { + builder.relation(ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS)); + } else { + builder.relation(ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN)); + } } } - if (ESTestCase.randomBoolean()) { builder.ignoreUnmapped(ESTestCase.randomBoolean()); } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java index 72073a6eff550..aa5ae72df2b9e 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.ShapeType; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.SearchExecutionContext; @@ -32,10 +33,18 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected ShapeRelation getShapeRelation(ShapeType type) { SearchExecutionContext context = createSearchExecutionContext(); - if (type == ShapeType.LINESTRING || type == ShapeType.MULTILINESTRING) { - return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS); + if (context.indexVersionCreated().onOrAfter(IndexVersions.V_7_5_0)) { // CONTAINS is only supported from version 7.5 + if (type == ShapeType.LINESTRING || type == ShapeType.MULTILINESTRING) { + return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS); + } else { + return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS); + } } else { - return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS); + if (type == ShapeType.LINESTRING || type == ShapeType.MULTILINESTRING) { + return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS); + } else { + return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN); + } } }