From 15f30d689bba00b8662ed047b5650af1920bfce5 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 17 Sep 2018 22:33:37 +0200 Subject: [PATCH 01/46] [CCR] Do not unnecessarily wrap fetch exception in a ElasticSearch exception and (#33777) * [CCR] Do not unnecessarily wrap fetch exception in a ElasticSearch exception and properly map fetch_exception.exception field as object. The extra caused by level is not necessary here: ``` "fetch_exceptions": [ { "from_seq_no": 1, "retries": 106, "exception": { "type": "exception", "reason": "[index1] IndexNotFoundException[no such index]", "caused_by": { "type": "index_not_found_exception", "reason": "no such index", "index_uuid": "_na_", "index": "index1" } } } ], ``` --- .../xpack/ccr/action/ShardFollowNodeTask.java | 3 ++- .../xpack/ccr/action/ShardFollowNodeTaskTests.java | 10 ++++------ .../collector/ccr/CcrStatsMonitoringDocTests.java | 9 ++++++++- .../plugin/core/src/main/resources/monitoring-es.json | 11 ++++++++++- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 6bf880661fc82..7c1cc3eb137bd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.Tuple; @@ -246,7 +247,7 @@ private void sendShardChangesRequest(long from, int maxOperationCount, long maxR synchronized (ShardFollowNodeTask.this) { totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); numberOfFailedFetches++; - fetchExceptions.put(from, Tuple.tuple(retryCounter, new ElasticsearchException(e))); + fetchExceptions.put(from, Tuple.tuple(retryCounter, ExceptionsHelper.convertToElastic(e))); } handleFailure(e, retryCounter, () -> sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, retryCounter)); }); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index ea4a1c12b45e1..7e813ae4cf67e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -198,12 +198,10 @@ public void testReceiveRetryableError() { final Map.Entry> entry = status.fetchExceptions().entrySet().iterator().next(); assertThat(entry.getValue().v1(), equalTo(Math.toIntExact(retryCounter.get()))); assertThat(entry.getKey(), equalTo(0L)); - assertThat(entry.getValue().v2(), instanceOf(ElasticsearchException.class)); - assertNotNull(entry.getValue().v2().getCause()); - assertThat(entry.getValue().v2().getCause(), instanceOf(ShardNotFoundException.class)); - final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().v2().getCause(); - assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); - assertThat(cause.getShardId().getId(), equalTo(0)); + assertThat(entry.getValue().v2(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException shardNotFoundException = (ShardNotFoundException) entry.getValue().v2(); + assertThat(shardNotFoundException.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(shardNotFoundException.getShardId().getId(), equalTo(0)); } retryCounter.incrementAndGet(); }; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java index 9124e1d524595..808a1e2015914 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java @@ -238,10 +238,17 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { } else { // Manual test specific object fields and if not just fail: if (fieldName.equals("fetch_exceptions")) { + assertThat(fieldType, equalTo("nested")); assertThat(((Map) fieldMapping.get("properties")).size(), equalTo(3)); assertThat(XContentMapValues.extractValue("properties.from_seq_no.type", fieldMapping), equalTo("long")); assertThat(XContentMapValues.extractValue("properties.retries.type", fieldMapping), equalTo("integer")); - assertThat(XContentMapValues.extractValue("properties.exception.type", fieldMapping), equalTo("text")); + assertThat(XContentMapValues.extractValue("properties.exception.type", fieldMapping), equalTo("object")); + + Map exceptionFieldMapping = + (Map) XContentMapValues.extractValue("properties.exception.properties", fieldMapping); + assertThat(exceptionFieldMapping.size(), equalTo(2)); + assertThat(XContentMapValues.extractValue("type.type", exceptionFieldMapping), equalTo("keyword")); + assertThat(XContentMapValues.extractValue("reason.type", exceptionFieldMapping), equalTo("text")); } else { fail("unexpected field value type [" + fieldValue.getClass() + "] for field [" + fieldName + "]"); } diff --git a/x-pack/plugin/core/src/main/resources/monitoring-es.json b/x-pack/plugin/core/src/main/resources/monitoring-es.json index 444f15912e62e..8464f495371d3 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-es.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-es.json @@ -983,6 +983,7 @@ "type": "long" }, "fetch_exceptions": { + "type": "nested", "properties": { "from_seq_no": { "type": "long" @@ -991,7 +992,15 @@ "type": "integer" }, "exception": { - "type": "text" + "type": "object", + "properties": { + "type" : { + "type": "keyword" + }, + "reason": { + "type": "text" + } + } } } }, From e686909768b163a0999e98efe7adb3046eeef3df Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 17 Sep 2018 15:10:16 -0700 Subject: [PATCH 02/46] Build: Change test task sanity check to be per project (#33544) This commit changes the sanity check which ensures the test task was properly replaced with randomized testing to have a per project check, isntead of a global one. The previous global check assumed all test tasks within the root project and below should be randomized testing, but that is not the case for a multi project in which only one project is an elasticsearch plugin. While the new check is not able to emit all of the failed replacements in one error message, the efficacy of the check remains. --- .../junit4/RandomizedTestingPlugin.groovy | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy index d4c8f89bf50cf..01458f4543dbf 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy @@ -1,7 +1,6 @@ package com.carrotsearch.gradle.junit4 import com.carrotsearch.ant.tasks.junit4.JUnit4 -import org.gradle.api.GradleException import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task @@ -11,12 +10,8 @@ import org.gradle.api.tasks.TaskContainer import org.gradle.api.tasks.TaskProvider import org.gradle.api.tasks.testing.Test -import java.util.concurrent.atomic.AtomicBoolean - class RandomizedTestingPlugin implements Plugin { - static private AtomicBoolean sanityCheckConfigured = new AtomicBoolean(false) - void apply(Project project) { setupSeed(project) replaceTestTask(project.tasks) @@ -27,16 +22,10 @@ class RandomizedTestingPlugin implements Plugin { private static void configureSanityCheck(Project project) { // Check the task graph to confirm tasks were indeed replaced // https://github.com/elastic/elasticsearch/issues/31324 - if (sanityCheckConfigured.getAndSet(true) == false) { - project.rootProject.getGradle().getTaskGraph().whenReady { - List nonConforming = project.getGradle().getTaskGraph().allTasks - .findAll { it.name == "test" } - .findAll { (it instanceof RandomizedTestingTask) == false} - .collect { "${it.path} -> ${it.class}" } - if (nonConforming.isEmpty() == false) { - throw new GradleException("Found the ${nonConforming.size()} `test` tasks:" + - "\n ${nonConforming.join("\n ")}") - } + project.rootProject.getGradle().getTaskGraph().whenReady { + Task test = project.getTasks().findByName("test") + if (test != null && (test instanceof RandomizedTestingTask) == false) { + throw new IllegalStateException("Test task was not replaced in project ${project.path}. Found ${test.getClass()}") } } } From 42e106fb226b767788859a32be86878b2ae10653 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Mon, 17 Sep 2018 17:38:30 -0500 Subject: [PATCH 03/46] HLRC: split indices request converters (#33433) In an effort to encapsulate the different clients, the request converters are being shuffled around. This splits the IndicesClient request converters. --- .../elasticsearch/client/IndicesClient.java | 104 +- .../client/IndicesRequestConverters.java | 403 ++++++++ .../client/RequestConverters.java | 356 ------- .../client/IndicesRequestConvertersTests.java | 893 ++++++++++++++++++ .../client/RequestConvertersTests.java | 819 +--------------- 5 files changed, 1352 insertions(+), 1223 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index c4567e22e0ba1..3811ba783445d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -89,7 +89,7 @@ public final class IndicesClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse delete(DeleteIndexRequest deleteIndexRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, options, + return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, IndicesRequestConverters::deleteIndex, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -102,7 +102,7 @@ public AcknowledgedResponse delete(DeleteIndexRequest deleteIndexRequest, Reques * @param listener the listener to be notified upon request completion */ public void deleteAsync(DeleteIndexRequest deleteIndexRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, options, + restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, IndicesRequestConverters::deleteIndex, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -116,7 +116,7 @@ public void deleteAsync(DeleteIndexRequest deleteIndexRequest, RequestOptions op * @throws IOException in case there is a problem sending the request or parsing back the response */ public CreateIndexResponse create(CreateIndexRequest createIndexRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex, options, + return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, IndicesRequestConverters::createIndex, options, CreateIndexResponse::fromXContent, emptySet()); } @@ -129,7 +129,7 @@ public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Request * @param listener the listener to be notified upon request completion */ public void createAsync(CreateIndexRequest createIndexRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex, options, + restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, IndicesRequestConverters::createIndex, options, CreateIndexResponse::fromXContent, listener, emptySet()); } @@ -143,7 +143,7 @@ public void createAsync(CreateIndexRequest createIndexRequest, RequestOptions op * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse putMapping(PutMappingRequest putMappingRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping, options, + return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, IndicesRequestConverters::putMapping, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -157,7 +157,7 @@ public AcknowledgedResponse putMapping(PutMappingRequest putMappingRequest, Requ */ public void putMappingAsync(PutMappingRequest putMappingRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping, options, + restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, IndicesRequestConverters::putMapping, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -171,7 +171,7 @@ public void putMappingAsync(PutMappingRequest putMappingRequest, RequestOptions * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetMappingsResponse getMapping(GetMappingsRequest getMappingsRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, RequestConverters::getMappings, options, + return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, IndicesRequestConverters::getMappings, options, GetMappingsResponse::fromXContent, emptySet()); } @@ -185,7 +185,7 @@ public GetMappingsResponse getMapping(GetMappingsRequest getMappingsRequest, Req */ public void getMappingAsync(GetMappingsRequest getMappingsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, RequestConverters::getMappings, options, + restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, IndicesRequestConverters::getMappings, options, GetMappingsResponse::fromXContent, listener, emptySet()); } @@ -200,7 +200,7 @@ public void getMappingAsync(GetMappingsRequest getMappingsRequest, RequestOption */ public GetFieldMappingsResponse getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getFieldMappingsRequest, RequestConverters::getFieldMapping, options, + return restHighLevelClient.performRequestAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, options, GetFieldMappingsResponse::fromXContent, emptySet()); } @@ -214,7 +214,7 @@ public GetFieldMappingsResponse getFieldMapping(GetFieldMappingsRequest getField */ public void getFieldMappingAsync(GetFieldMappingsRequest getFieldMappingsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getFieldMappingsRequest, RequestConverters::getFieldMapping, options, + restHighLevelClient.performRequestAsyncAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, options, GetFieldMappingsResponse::fromXContent, listener, emptySet()); } @@ -228,7 +228,7 @@ public void getFieldMappingAsync(GetFieldMappingsRequest getFieldMappingsRequest * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, options, + return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, IndicesRequestConverters::updateAliases, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -242,7 +242,7 @@ public AcknowledgedResponse updateAliases(IndicesAliasesRequest indicesAliasesRe */ public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, options, + restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, IndicesRequestConverters::updateAliases, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -256,7 +256,7 @@ public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, Requ * @throws IOException in case there is a problem sending the request or parsing back the response */ public OpenIndexResponse open(OpenIndexRequest openIndexRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex, options, + return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, IndicesRequestConverters::openIndex, options, OpenIndexResponse::fromXContent, emptySet()); } @@ -269,7 +269,7 @@ public OpenIndexResponse open(OpenIndexRequest openIndexRequest, RequestOptions * @param listener the listener to be notified upon request completion */ public void openAsync(OpenIndexRequest openIndexRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex, options, + restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, IndicesRequestConverters::openIndex, options, OpenIndexResponse::fromXContent, listener, emptySet()); } @@ -283,7 +283,7 @@ public void openAsync(OpenIndexRequest openIndexRequest, RequestOptions options, * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse close(CloseIndexRequest closeIndexRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, options, + return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, IndicesRequestConverters::closeIndex, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -296,7 +296,7 @@ public AcknowledgedResponse close(CloseIndexRequest closeIndexRequest, RequestOp * @param listener the listener to be notified upon request completion */ public void closeAsync(CloseIndexRequest closeIndexRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, options, + restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, IndicesRequestConverters::closeIndex, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -311,7 +311,7 @@ public void closeAsync(CloseIndexRequest closeIndexRequest, RequestOptions optio * @throws IOException in case there is a problem sending the request */ public boolean existsAlias(GetAliasesRequest getAliasesRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias, options, + return restHighLevelClient.performRequest(getAliasesRequest, IndicesRequestConverters::existsAlias, options, RestHighLevelClient::convertExistsResponse, emptySet()); } @@ -324,7 +324,7 @@ public boolean existsAlias(GetAliasesRequest getAliasesRequest, RequestOptions o * @param listener the listener to be notified upon request completion */ public void existsAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias, options, + restHighLevelClient.performRequestAsync(getAliasesRequest, IndicesRequestConverters::existsAlias, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); } @@ -337,7 +337,7 @@ public void existsAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions * @throws IOException in case there is a problem sending the request or parsing back the response */ public RefreshResponse refresh(RefreshRequest refreshRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, options, + return restHighLevelClient.performRequestAndParseEntity(refreshRequest, IndicesRequestConverters::refresh, options, RefreshResponse::fromXContent, emptySet()); } @@ -349,7 +349,7 @@ public RefreshResponse refresh(RefreshRequest refreshRequest, RequestOptions opt * @param listener the listener to be notified upon request completion */ public void refreshAsync(RefreshRequest refreshRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, options, + restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, IndicesRequestConverters::refresh, options, RefreshResponse::fromXContent, listener, emptySet()); } @@ -362,7 +362,7 @@ public void refreshAsync(RefreshRequest refreshRequest, RequestOptions options, * @throws IOException in case there is a problem sending the request or parsing back the response */ public FlushResponse flush(FlushRequest flushRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, options, + return restHighLevelClient.performRequestAndParseEntity(flushRequest, IndicesRequestConverters::flush, options, FlushResponse::fromXContent, emptySet()); } @@ -374,7 +374,7 @@ public FlushResponse flush(FlushRequest flushRequest, RequestOptions options) th * @param listener the listener to be notified upon request completion */ public void flushAsync(FlushRequest flushRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, options, + restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, IndicesRequestConverters::flush, options, FlushResponse::fromXContent, listener, emptySet()); } @@ -388,7 +388,7 @@ public void flushAsync(FlushRequest flushRequest, RequestOptions options, Action * @throws IOException in case there is a problem sending the request or parsing back the response */ public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, options, + return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options, SyncedFlushResponse::fromXContent, emptySet()); } @@ -402,7 +402,7 @@ public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, Re */ public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, options, + restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options, SyncedFlushResponse::fromXContent, listener, emptySet()); } @@ -416,7 +416,7 @@ public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptio * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetSettingsResponse getSettings(GetSettingsRequest getSettingsRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getSettingsRequest, RequestConverters::getSettings, options, + return restHighLevelClient.performRequestAndParseEntity(getSettingsRequest, IndicesRequestConverters::getSettings, options, GetSettingsResponse::fromXContent, emptySet()); } @@ -430,7 +430,7 @@ public GetSettingsResponse getSettings(GetSettingsRequest getSettingsRequest, Re */ public void getSettingsAsync(GetSettingsRequest getSettingsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getSettingsRequest, RequestConverters::getSettings, options, + restHighLevelClient.performRequestAsyncAndParseEntity(getSettingsRequest, IndicesRequestConverters::getSettings, options, GetSettingsResponse::fromXContent, listener, emptySet()); } @@ -444,7 +444,7 @@ public void getSettingsAsync(GetSettingsRequest getSettingsRequest, RequestOptio * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetIndexResponse get(GetIndexRequest getIndexRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getIndexRequest, RequestConverters::getIndex, options, + return restHighLevelClient.performRequestAndParseEntity(getIndexRequest, IndicesRequestConverters::getIndex, options, GetIndexResponse::fromXContent, emptySet()); } @@ -458,7 +458,7 @@ public GetIndexResponse get(GetIndexRequest getIndexRequest, RequestOptions opti */ public void getAsync(GetIndexRequest getIndexRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getIndexRequest, RequestConverters::getIndex, options, + restHighLevelClient.performRequestAsyncAndParseEntity(getIndexRequest, IndicesRequestConverters::getIndex, options, GetIndexResponse::fromXContent, listener, emptySet()); } @@ -487,7 +487,7 @@ public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Reques * @throws IOException in case there is a problem sending the request or parsing back the response */ public ForceMergeResponse forcemerge(ForceMergeRequest forceMergeRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, options, + return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, IndicesRequestConverters::forceMerge, options, ForceMergeResponse::fromXContent, emptySet()); } @@ -514,7 +514,7 @@ public void forceMergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions * @param listener the listener to be notified upon request completion */ public void forcemergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, options, + restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, IndicesRequestConverters::forceMerge, options, ForceMergeResponse::fromXContent, listener, emptySet()); } @@ -529,7 +529,7 @@ public void forcemergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions */ public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, options, + return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, IndicesRequestConverters::clearCache, options, ClearIndicesCacheResponse::fromXContent, emptySet()); } @@ -543,7 +543,7 @@ public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndice */ public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, options, + restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, IndicesRequestConverters::clearCache, options, ClearIndicesCacheResponse::fromXContent, listener, emptySet()); } @@ -559,7 +559,7 @@ public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, R public boolean exists(GetIndexRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequest( request, - RequestConverters::indicesExist, + IndicesRequestConverters::indicesExist, options, RestHighLevelClient::convertExistsResponse, Collections.emptySet() @@ -577,7 +577,7 @@ public boolean exists(GetIndexRequest request, RequestOptions options) throws IO public void existsAsync(GetIndexRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsync( request, - RequestConverters::indicesExist, + IndicesRequestConverters::indicesExist, options, RestHighLevelClient::convertExistsResponse, listener, @@ -595,7 +595,7 @@ public void existsAsync(GetIndexRequest request, RequestOptions options, ActionL * @throws IOException in case there is a problem sending the request or parsing back the response */ public ResizeResponse shrink(ResizeRequest resizeRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, options, + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, IndicesRequestConverters::shrink, options, ResizeResponse::fromXContent, emptySet()); } @@ -608,7 +608,7 @@ public ResizeResponse shrink(ResizeRequest resizeRequest, RequestOptions options * @param listener the listener to be notified upon request completion */ public void shrinkAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, options, + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::shrink, options, ResizeResponse::fromXContent, listener, emptySet()); } @@ -622,7 +622,7 @@ public void shrinkAsync(ResizeRequest resizeRequest, RequestOptions options, Act * @throws IOException in case there is a problem sending the request or parsing back the response */ public ResizeResponse split(ResizeRequest resizeRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, options, + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, IndicesRequestConverters::split, options, ResizeResponse::fromXContent, emptySet()); } @@ -635,7 +635,7 @@ public ResizeResponse split(ResizeRequest resizeRequest, RequestOptions options) * @param listener the listener to be notified upon request completion */ public void splitAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, options, + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::split, options, ResizeResponse::fromXContent, listener, emptySet()); } @@ -649,7 +649,7 @@ public void splitAsync(ResizeRequest resizeRequest, RequestOptions options, Acti * @throws IOException in case there is a problem sending the request or parsing back the response */ public RolloverResponse rollover(RolloverRequest rolloverRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover, options, + return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, IndicesRequestConverters::rollover, options, RolloverResponse::fromXContent, emptySet()); } @@ -662,7 +662,7 @@ public RolloverResponse rollover(RolloverRequest rolloverRequest, RequestOptions * @param listener the listener to be notified upon request completion */ public void rolloverAsync(RolloverRequest rolloverRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, options, + restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, IndicesRequestConverters::rollover, options, RolloverResponse::fromXContent, listener, emptySet()); } @@ -676,7 +676,7 @@ public void rolloverAsync(RolloverRequest rolloverRequest, RequestOptions option * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetAliasesResponse getAlias(GetAliasesRequest getAliasesRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getAliasesRequest, RequestConverters::getAlias, options, + return restHighLevelClient.performRequestAndParseEntity(getAliasesRequest, IndicesRequestConverters::getAlias, options, GetAliasesResponse::fromXContent, singleton(RestStatus.NOT_FOUND.getStatus())); } @@ -689,7 +689,7 @@ public GetAliasesResponse getAlias(GetAliasesRequest getAliasesRequest, RequestO * @param listener the listener to be notified upon request completion */ public void getAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getAliasesRequest, RequestConverters::getAlias, options, + restHighLevelClient.performRequestAsyncAndParseEntity(getAliasesRequest, IndicesRequestConverters::getAlias, options, GetAliasesResponse::fromXContent, listener, singleton(RestStatus.NOT_FOUND.getStatus())); } @@ -703,7 +703,7 @@ public void getAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions op * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse putSettings(UpdateSettingsRequest updateSettingsRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, options, + return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, IndicesRequestConverters::indexPutSettings, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -717,7 +717,7 @@ public AcknowledgedResponse putSettings(UpdateSettingsRequest updateSettingsRequ */ public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, options, + restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, IndicesRequestConverters::indexPutSettings, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -732,7 +732,7 @@ public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, Reques */ public AcknowledgedResponse putTemplate(PutIndexTemplateRequest putIndexTemplateRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, options, + return restHighLevelClient.performRequestAndParseEntity(putIndexTemplateRequest, IndicesRequestConverters::putTemplate, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -746,7 +746,7 @@ public AcknowledgedResponse putTemplate(PutIndexTemplateRequest putIndexTemplate */ public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, options, + restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, IndicesRequestConverters::putTemplate, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -761,7 +761,7 @@ public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, Re * @throws IOException in case there is a problem sending the request or parsing back the response */ public ValidateQueryResponse validateQuery(ValidateQueryRequest validateQueryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(validateQueryRequest, RequestConverters::validateQuery, options, + return restHighLevelClient.performRequestAndParseEntity(validateQueryRequest, IndicesRequestConverters::validateQuery, options, ValidateQueryResponse::fromXContent, emptySet()); } @@ -776,7 +776,7 @@ public ValidateQueryResponse validateQuery(ValidateQueryRequest validateQueryReq */ public void validateQueryAsync(ValidateQueryRequest validateQueryRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(validateQueryRequest, RequestConverters::validateQuery, options, + restHighLevelClient.performRequestAsyncAndParseEntity(validateQueryRequest, IndicesRequestConverters::validateQuery, options, ValidateQueryResponse::fromXContent, listener, emptySet()); } @@ -791,7 +791,7 @@ public void validateQueryAsync(ValidateQueryRequest validateQueryRequest, Reques */ public GetIndexTemplatesResponse getTemplate(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getIndexTemplatesRequest, RequestConverters::getTemplates, + return restHighLevelClient.performRequestAndParseEntity(getIndexTemplatesRequest, IndicesRequestConverters::getTemplates, options, GetIndexTemplatesResponse::fromXContent, emptySet()); } @@ -805,7 +805,7 @@ public GetIndexTemplatesResponse getTemplate(GetIndexTemplatesRequest getIndexTe */ public void getTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, RequestConverters::getTemplates, + restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, IndicesRequestConverters::getTemplates, options, GetIndexTemplatesResponse::fromXContent, listener, emptySet()); } @@ -818,7 +818,7 @@ public void getTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized */ public AnalyzeResponse analyze(AnalyzeRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::analyze, options, + return restHighLevelClient.performRequestAndParseEntity(request, IndicesRequestConverters::analyze, options, AnalyzeResponse::fromXContent, emptySet()); } @@ -833,7 +833,7 @@ public AnalyzeResponse analyze(AnalyzeRequest request, RequestOptions options) t */ public void analyzeAsync(AnalyzeRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::analyze, options, + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::analyze, options, AnalyzeResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java new file mode 100644 index 0000000000000..740b87107c150 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -0,0 +1,403 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.common.Strings; + +import java.io.IOException; +import java.util.Locale; + +public class IndicesRequestConverters { + static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { + String endpoint = RequestConverters.endpoint(deleteIndexRequest.indices()); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(deleteIndexRequest.timeout()); + parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout()); + parameters.withIndicesOptions(deleteIndexRequest.indicesOptions()); + return request; + } + + static Request openIndex(OpenIndexRequest openIndexRequest) { + String endpoint = RequestConverters.endpoint(openIndexRequest.indices(), "_open"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(openIndexRequest.timeout()); + parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout()); + parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards()); + parameters.withIndicesOptions(openIndexRequest.indicesOptions()); + return request; + } + + static Request closeIndex(CloseIndexRequest closeIndexRequest) { + String endpoint = RequestConverters.endpoint(closeIndexRequest.indices(), "_close"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(closeIndexRequest.timeout()); + parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); + parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); + return request; + } + + static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { + String endpoint = RequestConverters.endpoint(createIndexRequest.indices()); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(createIndexRequest.timeout()); + parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); + parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); + + request.setEntity(RequestConverters.createEntity(createIndexRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException { + Request request = new Request(HttpPost.METHOD_NAME, "/_aliases"); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(indicesAliasesRequest.timeout()); + parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout()); + + request.setEntity(RequestConverters.createEntity(indicesAliasesRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request putMapping(PutMappingRequest putMappingRequest) throws IOException { + // The concreteIndex is an internal concept, not applicable to requests made over the REST API. + if (putMappingRequest.getConcreteIndex() != null) { + throw new IllegalArgumentException("concreteIndex cannot be set on PutMapping requests made over the REST API"); + } + + Request request = new Request(HttpPut.METHOD_NAME, RequestConverters.endpoint(putMappingRequest.indices(), "_mapping", + putMappingRequest.type())); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(putMappingRequest.timeout()); + parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); + + request.setEntity(RequestConverters.createEntity(putMappingRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getMappings(GetMappingsRequest getMappingsRequest) throws IOException { + String[] indices = getMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getMappingsRequest.indices(); + String[] types = getMappingsRequest.types() == null ? Strings.EMPTY_ARRAY : getMappingsRequest.types(); + + Request request = new Request(HttpGet.METHOD_NAME, RequestConverters.endpoint(indices, "_mapping", types)); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(getMappingsRequest.masterNodeTimeout()); + parameters.withIndicesOptions(getMappingsRequest.indicesOptions()); + parameters.withLocal(getMappingsRequest.local()); + return request; + } + + static Request getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest) throws IOException { + String[] indices = getFieldMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.indices(); + String[] types = getFieldMappingsRequest.types() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.types(); + String[] fields = getFieldMappingsRequest.fields() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.fields(); + + String endpoint = new RequestConverters.EndpointBuilder().addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_mapping").addCommaSeparatedPathParts(types) + .addPathPartAsIs("field").addCommaSeparatedPathParts(fields) + .build(); + + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withIndicesOptions(getFieldMappingsRequest.indicesOptions()); + parameters.withIncludeDefaults(getFieldMappingsRequest.includeDefaults()); + parameters.withLocal(getFieldMappingsRequest.local()); + return request; + } + + static Request refresh(RefreshRequest refreshRequest) { + String[] indices = refreshRequest.indices() == null ? Strings.EMPTY_ARRAY : refreshRequest.indices(); + Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_refresh")); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withIndicesOptions(refreshRequest.indicesOptions()); + return request; + } + + static Request flush(FlushRequest flushRequest) { + String[] indices = flushRequest.indices() == null ? Strings.EMPTY_ARRAY : flushRequest.indices(); + Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush")); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withIndicesOptions(flushRequest.indicesOptions()); + parameters.putParam("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); + parameters.putParam("force", Boolean.toString(flushRequest.force())); + return request; + } + + static Request flushSynced(SyncedFlushRequest syncedFlushRequest) { + String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices(); + Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush/synced")); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withIndicesOptions(syncedFlushRequest.indicesOptions()); + return request; + } + + static Request forceMerge(ForceMergeRequest forceMergeRequest) { + String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); + Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge")); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withIndicesOptions(forceMergeRequest.indicesOptions()); + parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); + parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); + parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush())); + return request; + } + + static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) { + String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices(); + Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_cache/clear")); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withIndicesOptions(clearIndicesCacheRequest.indicesOptions()); + parameters.putParam("query", Boolean.toString(clearIndicesCacheRequest.queryCache())); + parameters.putParam("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache())); + parameters.putParam("request", Boolean.toString(clearIndicesCacheRequest.requestCache())); + parameters.putParam("fields", String.join(",", clearIndicesCacheRequest.fields())); + return request; + } + + static Request existsAlias(GetAliasesRequest getAliasesRequest) { + if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) && + (getAliasesRequest.aliases() == null || getAliasesRequest.aliases().length == 0)) { + throw new IllegalArgumentException("existsAlias requires at least an alias or an index"); + } + String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); + String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); + + Request request = new Request(HttpHead.METHOD_NAME, RequestConverters.endpoint(indices, "_alias", aliases)); + + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(getAliasesRequest.indicesOptions()); + params.withLocal(getAliasesRequest.local()); + return request; + } + + static Request split(ResizeRequest resizeRequest) throws IOException { + if (resizeRequest.getResizeType() != ResizeType.SPLIT) { + throw new IllegalArgumentException("Wrong resize type [" + resizeRequest.getResizeType() + "] for indices split request"); + } + return resize(resizeRequest); + } + + static Request shrink(ResizeRequest resizeRequest) throws IOException { + if (resizeRequest.getResizeType() != ResizeType.SHRINK) { + throw new IllegalArgumentException("Wrong resize type [" + resizeRequest.getResizeType() + "] for indices shrink request"); + } + return resize(resizeRequest); + } + + private static Request resize(ResizeRequest resizeRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder().addPathPart(resizeRequest.getSourceIndex()) + .addPathPartAsIs("_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT)) + .addPathPart(resizeRequest.getTargetIndexRequest().index()).build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + params.withTimeout(resizeRequest.timeout()); + params.withMasterTimeout(resizeRequest.masterNodeTimeout()); + params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards()); + + request.setEntity(RequestConverters.createEntity(resizeRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request rollover(RolloverRequest rolloverRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") + .addPathPart(rolloverRequest.getNewIndexName()).build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + params.withTimeout(rolloverRequest.timeout()); + params.withMasterTimeout(rolloverRequest.masterNodeTimeout()); + params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); + if (rolloverRequest.isDryRun()) { + params.putParam("dry_run", Boolean.TRUE.toString()); + } + + request.setEntity(RequestConverters.createEntity(rolloverRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getSettings(GetSettingsRequest getSettingsRequest) { + String[] indices = getSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getSettingsRequest.indices(); + String[] names = getSettingsRequest.names() == null ? Strings.EMPTY_ARRAY : getSettingsRequest.names(); + + String endpoint = RequestConverters.endpoint(indices, "_settings", names); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(getSettingsRequest.indicesOptions()); + params.withLocal(getSettingsRequest.local()); + params.withIncludeDefaults(getSettingsRequest.includeDefaults()); + params.withMasterTimeout(getSettingsRequest.masterNodeTimeout()); + + return request; + } + + static Request getIndex(GetIndexRequest getIndexRequest) { + String[] indices = getIndexRequest.indices() == null ? Strings.EMPTY_ARRAY : getIndexRequest.indices(); + + String endpoint = RequestConverters.endpoint(indices); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(getIndexRequest.indicesOptions()); + params.withLocal(getIndexRequest.local()); + params.withIncludeDefaults(getIndexRequest.includeDefaults()); + params.withHuman(getIndexRequest.humanReadable()); + params.withMasterTimeout(getIndexRequest.masterNodeTimeout()); + + return request; + } + + static Request indicesExist(GetIndexRequest getIndexRequest) { + // this can be called with no indices as argument by transport client, not via REST though + if (getIndexRequest.indices() == null || getIndexRequest.indices().length == 0) { + throw new IllegalArgumentException("indices are mandatory"); + } + String endpoint = RequestConverters.endpoint(getIndexRequest.indices(), ""); + Request request = new Request(HttpHead.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + params.withLocal(getIndexRequest.local()); + params.withHuman(getIndexRequest.humanReadable()); + params.withIndicesOptions(getIndexRequest.indicesOptions()); + params.withIncludeDefaults(getIndexRequest.includeDefaults()); + return request; + } + + static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException { + String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); + Request request = new Request(HttpPut.METHOD_NAME, RequestConverters.endpoint(indices, "_settings")); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(updateSettingsRequest.timeout()); + parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); + parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); + parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); + + request.setEntity(RequestConverters.createEntity(updateSettingsRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_template") + .addPathPart(putIndexTemplateRequest.name()).build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(putIndexTemplateRequest.masterNodeTimeout()); + if (putIndexTemplateRequest.create()) { + params.putParam("create", Boolean.TRUE.toString()); + } + if (Strings.hasText(putIndexTemplateRequest.cause())) { + params.putParam("cause", putIndexTemplateRequest.cause()); + } + request.setEntity(RequestConverters.createEntity(putIndexTemplateRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request validateQuery(ValidateQueryRequest validateQueryRequest) throws IOException { + String[] indices = validateQueryRequest.indices() == null ? Strings.EMPTY_ARRAY : validateQueryRequest.indices(); + String[] types = validateQueryRequest.types() == null || indices.length <= 0 ? Strings.EMPTY_ARRAY : validateQueryRequest.types(); + String endpoint = RequestConverters.endpoint(indices, types, "_validate/query"); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(validateQueryRequest.indicesOptions()); + params.putParam("explain", Boolean.toString(validateQueryRequest.explain())); + params.putParam("all_shards", Boolean.toString(validateQueryRequest.allShards())); + params.putParam("rewrite", Boolean.toString(validateQueryRequest.rewrite())); + request.setEntity(RequestConverters.createEntity(validateQueryRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getAlias(GetAliasesRequest getAliasesRequest) { + String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); + String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); + String endpoint = RequestConverters.endpoint(indices, "_alias", aliases); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(getAliasesRequest.indicesOptions()); + params.withLocal(getAliasesRequest.local()); + return request; + } + + static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) throws IOException { + String[] names = getIndexTemplatesRequest.names(); + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_template").addCommaSeparatedPathParts(names).build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withLocal(getIndexTemplatesRequest.local()); + params.withMasterTimeout(getIndexTemplatesRequest.masterNodeTimeout()); + return request; + } + + static Request analyze(AnalyzeRequest request) throws IOException { + RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder(); + String index = request.index(); + if (index != null) { + builder.addPathPart(index); + } + builder.addPathPartAsIs("_analyze"); + Request req = new Request(HttpGet.METHOD_NAME, builder.build()); + req.setEntity(RequestConverters.createEntity(request, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return req; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 840bc4f0c4d9f..8372f4b0fecd0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -33,30 +33,7 @@ import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; -import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeType; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.explain.ExplainRequest; @@ -130,165 +107,6 @@ static Request delete(DeleteRequest deleteRequest) { return request; } - static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { - String endpoint = endpoint(deleteIndexRequest.indices()); - Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withTimeout(deleteIndexRequest.timeout()); - parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout()); - parameters.withIndicesOptions(deleteIndexRequest.indicesOptions()); - return request; - } - - static Request openIndex(OpenIndexRequest openIndexRequest) { - String endpoint = endpoint(openIndexRequest.indices(), "_open"); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withTimeout(openIndexRequest.timeout()); - parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout()); - parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards()); - parameters.withIndicesOptions(openIndexRequest.indicesOptions()); - return request; - } - - static Request closeIndex(CloseIndexRequest closeIndexRequest) { - String endpoint = endpoint(closeIndexRequest.indices(), "_close"); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withTimeout(closeIndexRequest.timeout()); - parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); - parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); - return request; - } - - static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { - String endpoint = endpoint(createIndexRequest.indices()); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withTimeout(createIndexRequest.timeout()); - parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); - parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); - - request.setEntity(createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_aliases"); - - Params parameters = new Params(request); - parameters.withTimeout(indicesAliasesRequest.timeout()); - parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout()); - - request.setEntity(createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request putMapping(PutMappingRequest putMappingRequest) throws IOException { - // The concreteIndex is an internal concept, not applicable to requests made over the REST API. - if (putMappingRequest.getConcreteIndex() != null) { - throw new IllegalArgumentException("concreteIndex cannot be set on PutMapping requests made over the REST API"); - } - - Request request = new Request(HttpPut.METHOD_NAME, endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type())); - - Params parameters = new Params(request); - parameters.withTimeout(putMappingRequest.timeout()); - parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); - - request.setEntity(createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request getMappings(GetMappingsRequest getMappingsRequest) throws IOException { - String[] indices = getMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getMappingsRequest.indices(); - String[] types = getMappingsRequest.types() == null ? Strings.EMPTY_ARRAY : getMappingsRequest.types(); - - Request request = new Request(HttpGet.METHOD_NAME, endpoint(indices, "_mapping", types)); - - Params parameters = new Params(request); - parameters.withMasterTimeout(getMappingsRequest.masterNodeTimeout()); - parameters.withIndicesOptions(getMappingsRequest.indicesOptions()); - parameters.withLocal(getMappingsRequest.local()); - return request; - } - - static Request getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest) throws IOException { - String[] indices = getFieldMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.indices(); - String[] types = getFieldMappingsRequest.types() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.types(); - String[] fields = getFieldMappingsRequest.fields() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.fields(); - - String endpoint = new EndpointBuilder().addCommaSeparatedPathParts(indices) - .addPathPartAsIs("_mapping").addCommaSeparatedPathParts(types) - .addPathPartAsIs("field").addCommaSeparatedPathParts(fields) - .build(); - - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - - Params parameters = new Params(request); - parameters.withIndicesOptions(getFieldMappingsRequest.indicesOptions()); - parameters.withIncludeDefaults(getFieldMappingsRequest.includeDefaults()); - parameters.withLocal(getFieldMappingsRequest.local()); - return request; - } - - static Request refresh(RefreshRequest refreshRequest) { - String[] indices = refreshRequest.indices() == null ? Strings.EMPTY_ARRAY : refreshRequest.indices(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_refresh")); - - Params parameters = new Params(request); - parameters.withIndicesOptions(refreshRequest.indicesOptions()); - return request; - } - - static Request flush(FlushRequest flushRequest) { - String[] indices = flushRequest.indices() == null ? Strings.EMPTY_ARRAY : flushRequest.indices(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_flush")); - - Params parameters = new Params(request); - parameters.withIndicesOptions(flushRequest.indicesOptions()); - parameters.putParam("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); - parameters.putParam("force", Boolean.toString(flushRequest.force())); - return request; - } - - static Request flushSynced(SyncedFlushRequest syncedFlushRequest) { - String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_flush/synced")); - Params parameters = new Params(request); - parameters.withIndicesOptions(syncedFlushRequest.indicesOptions()); - return request; - } - - static Request forceMerge(ForceMergeRequest forceMergeRequest) { - String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_forcemerge")); - - Params parameters = new Params(request); - parameters.withIndicesOptions(forceMergeRequest.indicesOptions()); - parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); - parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); - parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush())); - return request; - } - - static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) { - String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_cache/clear")); - - Params parameters = new Params(request); - parameters.withIndicesOptions(clearIndicesCacheRequest.indicesOptions()); - parameters.putParam("query", Boolean.toString(clearIndicesCacheRequest.queryCache())); - parameters.putParam("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache())); - parameters.putParam("request", Boolean.toString(clearIndicesCacheRequest.requestCache())); - parameters.putParam("fields", String.join(",", clearIndicesCacheRequest.fields())); - return request; - } - static Request info() { return new Request(HttpGet.METHOD_NAME, "/"); } @@ -609,22 +427,6 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat return request; } - static Request existsAlias(GetAliasesRequest getAliasesRequest) { - if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) && - (getAliasesRequest.aliases() == null || getAliasesRequest.aliases().length == 0)) { - throw new IllegalArgumentException("existsAlias requires at least an alias or an index"); - } - String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); - String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); - - Request request = new Request(HttpHead.METHOD_NAME, endpoint(indices, "_alias", aliases)); - - Params params = new Params(request); - params.withIndicesOptions(getAliasesRequest.indicesOptions()); - params.withLocal(getAliasesRequest.local()); - return request; - } - static Request explain(ExplainRequest explainRequest) throws IOException { Request request = new Request(HttpGet.METHOD_NAME, endpoint(explainRequest.index(), explainRequest.type(), explainRequest.id(), "_explain")); @@ -657,35 +459,6 @@ static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { return request; } - static Request split(ResizeRequest resizeRequest) throws IOException { - if (resizeRequest.getResizeType() != ResizeType.SPLIT) { - throw new IllegalArgumentException("Wrong resize type [" + resizeRequest.getResizeType() + "] for indices split request"); - } - return resize(resizeRequest); - } - - static Request shrink(ResizeRequest resizeRequest) throws IOException { - if (resizeRequest.getResizeType() != ResizeType.SHRINK) { - throw new IllegalArgumentException("Wrong resize type [" + resizeRequest.getResizeType() + "] for indices shrink request"); - } - return resize(resizeRequest); - } - - private static Request resize(ResizeRequest resizeRequest) throws IOException { - String endpoint = new EndpointBuilder().addPathPart(resizeRequest.getSourceIndex()) - .addPathPartAsIs("_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT)) - .addPathPart(resizeRequest.getTargetIndexRequest().index()).build(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - - Params params = new Params(request); - params.withTimeout(resizeRequest.timeout()); - params.withMasterTimeout(resizeRequest.masterNodeTimeout()); - params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards()); - - request.setEntity(createEntity(resizeRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - static Request reindex(ReindexRequest reindexRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPart("_reindex").build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); @@ -754,135 +527,6 @@ static Request deleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws I return request; } - static Request rollover(RolloverRequest rolloverRequest) throws IOException { - String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") - .addPathPart(rolloverRequest.getNewIndexName()).build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - - Params params = new Params(request); - params.withTimeout(rolloverRequest.timeout()); - params.withMasterTimeout(rolloverRequest.masterNodeTimeout()); - params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); - if (rolloverRequest.isDryRun()) { - params.putParam("dry_run", Boolean.TRUE.toString()); - } - - request.setEntity(createEntity(rolloverRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request getSettings(GetSettingsRequest getSettingsRequest) { - String[] indices = getSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getSettingsRequest.indices(); - String[] names = getSettingsRequest.names() == null ? Strings.EMPTY_ARRAY : getSettingsRequest.names(); - - String endpoint = endpoint(indices, "_settings", names); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - - Params params = new Params(request); - params.withIndicesOptions(getSettingsRequest.indicesOptions()); - params.withLocal(getSettingsRequest.local()); - params.withIncludeDefaults(getSettingsRequest.includeDefaults()); - params.withMasterTimeout(getSettingsRequest.masterNodeTimeout()); - - return request; - } - - static Request getIndex(GetIndexRequest getIndexRequest) { - String[] indices = getIndexRequest.indices() == null ? Strings.EMPTY_ARRAY : getIndexRequest.indices(); - - String endpoint = endpoint(indices); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - - Params params = new Params(request); - params.withIndicesOptions(getIndexRequest.indicesOptions()); - params.withLocal(getIndexRequest.local()); - params.withIncludeDefaults(getIndexRequest.includeDefaults()); - params.withHuman(getIndexRequest.humanReadable()); - params.withMasterTimeout(getIndexRequest.masterNodeTimeout()); - - return request; - } - - static Request indicesExist(GetIndexRequest getIndexRequest) { - // this can be called with no indices as argument by transport client, not via REST though - if (getIndexRequest.indices() == null || getIndexRequest.indices().length == 0) { - throw new IllegalArgumentException("indices are mandatory"); - } - String endpoint = endpoint(getIndexRequest.indices(), ""); - Request request = new Request(HttpHead.METHOD_NAME, endpoint); - - Params params = new Params(request); - params.withLocal(getIndexRequest.local()); - params.withHuman(getIndexRequest.humanReadable()); - params.withIndicesOptions(getIndexRequest.indicesOptions()); - params.withIncludeDefaults(getIndexRequest.includeDefaults()); - return request; - } - - static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException { - String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint(indices, "_settings")); - - Params parameters = new Params(request); - parameters.withTimeout(updateSettingsRequest.timeout()); - parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); - parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); - parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); - - request.setEntity(createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { - String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - Params params = new Params(request); - params.withMasterTimeout(putIndexTemplateRequest.masterNodeTimeout()); - if (putIndexTemplateRequest.create()) { - params.putParam("create", Boolean.TRUE.toString()); - } - if (Strings.hasText(putIndexTemplateRequest.cause())) { - params.putParam("cause", putIndexTemplateRequest.cause()); - } - request.setEntity(createEntity(putIndexTemplateRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request validateQuery(ValidateQueryRequest validateQueryRequest) throws IOException { - String[] indices = validateQueryRequest.indices() == null ? Strings.EMPTY_ARRAY : validateQueryRequest.indices(); - String[] types = validateQueryRequest.types() == null || indices.length <= 0 ? Strings.EMPTY_ARRAY : validateQueryRequest.types(); - String endpoint = endpoint(indices, types, "_validate/query"); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(request); - params.withIndicesOptions(validateQueryRequest.indicesOptions()); - params.putParam("explain", Boolean.toString(validateQueryRequest.explain())); - params.putParam("all_shards", Boolean.toString(validateQueryRequest.allShards())); - params.putParam("rewrite", Boolean.toString(validateQueryRequest.rewrite())); - request.setEntity(createEntity(validateQueryRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request getAlias(GetAliasesRequest getAliasesRequest) { - String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); - String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); - String endpoint = endpoint(indices, "_alias", aliases); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(request); - params.withIndicesOptions(getAliasesRequest.indicesOptions()); - params.withLocal(getAliasesRequest.local()); - return request; - } - - static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) throws IOException { - String[] names = getIndexTemplatesRequest.names(); - String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addCommaSeparatedPathParts(names).build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(request); - params.withLocal(getIndexTemplatesRequest.local()); - params.withMasterTimeout(getIndexTemplatesRequest.masterNodeTimeout()); - return request; - } - static Request putScript(PutStoredScriptRequest putStoredScriptRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(putStoredScriptRequest.id()).build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java new file mode 100644 index 0000000000000..e97041054995e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -0,0 +1,893 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.index.RandomCreateIndexGenerator; +import org.elasticsearch.test.ESTestCase; +import org.junit.Assert; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.StringJoiner; +import java.util.stream.Collectors; + +import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases; +import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest; +import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings; +import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class IndicesRequestConvertersTests extends ESTestCase { + + public void testAnalyzeRequest() throws Exception { + AnalyzeRequest indexAnalyzeRequest = new AnalyzeRequest() + .text("Here is some text") + .index("test_index") + .analyzer("test_analyzer"); + + Request request = IndicesRequestConverters.analyze(indexAnalyzeRequest); + assertThat(request.getEndpoint(), equalTo("/test_index/_analyze")); + RequestConvertersTests.assertToXContentBody(indexAnalyzeRequest, request.getEntity()); + + AnalyzeRequest analyzeRequest = new AnalyzeRequest() + .text("more text") + .analyzer("test_analyzer"); + assertThat(IndicesRequestConverters.analyze(analyzeRequest).getEndpoint(), equalTo("/_analyze")); + } + + public void testIndicesExist() { + String[] indices = RequestConvertersTests.randomIndicesNames(1, 10); + + GetIndexRequest getIndexRequest = new GetIndexRequest().indices(indices); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams); + RequestConvertersTests.setRandomLocal(getIndexRequest, expectedParams); + RequestConvertersTests.setRandomHumanReadable(getIndexRequest, expectedParams); + RequestConvertersTests.setRandomIncludeDefaults(getIndexRequest, expectedParams); + + final Request request = IndicesRequestConverters.indicesExist(getIndexRequest); + + Assert.assertEquals(HttpHead.METHOD_NAME, request.getMethod()); + Assert.assertEquals("/" + String.join(",", indices), request.getEndpoint()); + Assert.assertThat(expectedParams, equalTo(request.getParameters())); + Assert.assertNull(request.getEntity()); + } + + public void testIndicesExistEmptyIndices() { + LuceneTestCase.expectThrows(IllegalArgumentException.class, () + -> IndicesRequestConverters.indicesExist(new GetIndexRequest())); + LuceneTestCase.expectThrows(IllegalArgumentException.class, () + -> IndicesRequestConverters.indicesExist(new GetIndexRequest().indices((String[]) null))); + } + + public void testCreateIndex() throws IOException { + CreateIndexRequest createIndexRequest = randomCreateIndexRequest(); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomTimeout(createIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(createIndexRequest, expectedParams); + RequestConvertersTests.setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); + + Request request = IndicesRequestConverters.createIndex(createIndexRequest); + Assert.assertEquals("/" + createIndexRequest.index(), request.getEndpoint()); + Assert.assertEquals(expectedParams, request.getParameters()); + Assert.assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + RequestConvertersTests.assertToXContentBody(createIndexRequest, request.getEntity()); + } + + public void testCreateIndexNullIndex() { + ActionRequestValidationException validationException = new CreateIndexRequest(null).validate(); + Assert.assertNotNull(validationException); + } + + public void testUpdateAliases() throws IOException { + IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); + IndicesAliasesRequest.AliasActions aliasAction = randomAliasAction(); + indicesAliasesRequest.addAliasAction(aliasAction); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(indicesAliasesRequest, expectedParams); + + Request request = IndicesRequestConverters.updateAliases(indicesAliasesRequest); + Assert.assertEquals("/_aliases", request.getEndpoint()); + Assert.assertEquals(expectedParams, request.getParameters()); + RequestConvertersTests.assertToXContentBody(indicesAliasesRequest, request.getEntity()); + } + + public void testPutMapping() throws IOException { + PutMappingRequest putMappingRequest = new PutMappingRequest(); + + String[] indices = RequestConvertersTests.randomIndicesNames(0, 5); + putMappingRequest.indices(indices); + + String type = ESTestCase.randomAlphaOfLengthBetween(3, 10); + putMappingRequest.type(type); + + Map expectedParams = new HashMap<>(); + + RequestConvertersTests.setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(putMappingRequest, expectedParams); + + Request request = IndicesRequestConverters.putMapping(putMappingRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_mapping"); + endpoint.add(type); + Assert.assertEquals(endpoint.toString(), request.getEndpoint()); + + Assert.assertEquals(expectedParams, request.getParameters()); + Assert.assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + RequestConvertersTests.assertToXContentBody(putMappingRequest, request.getEntity()); + } + + public void testGetMapping() throws IOException { + GetMappingsRequest getMappingRequest = new GetMappingsRequest(); + + String[] indices = Strings.EMPTY_ARRAY; + if (ESTestCase.randomBoolean()) { + indices = RequestConvertersTests.randomIndicesNames(0, 5); + getMappingRequest.indices(indices); + } else if (ESTestCase.randomBoolean()) { + getMappingRequest.indices((String[]) null); + } + + String type = null; + if (ESTestCase.randomBoolean()) { + type = ESTestCase.randomAlphaOfLengthBetween(3, 10); + getMappingRequest.types(type); + } else if (ESTestCase.randomBoolean()) { + getMappingRequest.types((String[]) null); + } + + Map expectedParams = new HashMap<>(); + + RequestConvertersTests.setRandomIndicesOptions(getMappingRequest::indicesOptions, + getMappingRequest::indicesOptions, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(getMappingRequest, expectedParams); + RequestConvertersTests.setRandomLocal(getMappingRequest, expectedParams); + + Request request = IndicesRequestConverters.getMappings(getMappingRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_mapping"); + if (type != null) { + endpoint.add(type); + } + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + + Assert.assertThat(expectedParams, equalTo(request.getParameters())); + Assert.assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + } + + public void testGetFieldMapping() throws IOException { + GetFieldMappingsRequest getFieldMappingsRequest = new GetFieldMappingsRequest(); + + String[] indices = Strings.EMPTY_ARRAY; + if (ESTestCase.randomBoolean()) { + indices = RequestConvertersTests.randomIndicesNames(0, 5); + getFieldMappingsRequest.indices(indices); + } else if (ESTestCase.randomBoolean()) { + getFieldMappingsRequest.indices((String[]) null); + } + + String type = null; + if (ESTestCase.randomBoolean()) { + type = ESTestCase.randomAlphaOfLengthBetween(3, 10); + getFieldMappingsRequest.types(type); + } else if (ESTestCase.randomBoolean()) { + getFieldMappingsRequest.types((String[]) null); + } + + String[] fields = null; + if (ESTestCase.randomBoolean()) { + fields = new String[ESTestCase.randomIntBetween(1, 5)]; + for (int i = 0; i < fields.length; i++) { + fields[i] = ESTestCase.randomAlphaOfLengthBetween(3, 10); + } + getFieldMappingsRequest.fields(fields); + } else if (ESTestCase.randomBoolean()) { + getFieldMappingsRequest.fields((String[]) null); + } + + Map expectedParams = new HashMap<>(); + + RequestConvertersTests.setRandomIndicesOptions(getFieldMappingsRequest::indicesOptions, getFieldMappingsRequest::indicesOptions, + expectedParams); + RequestConvertersTests.setRandomLocal(getFieldMappingsRequest::local, expectedParams); + + Request request = IndicesRequestConverters.getFieldMapping(getFieldMappingsRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_mapping"); + if (type != null) { + endpoint.add(type); + } + endpoint.add("field"); + if (fields != null) { + endpoint.add(String.join(",", fields)); + } + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + + Assert.assertThat(expectedParams, equalTo(request.getParameters())); + Assert.assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + } + + public void testDeleteIndex() { + String[] indices = RequestConvertersTests.randomIndicesNames(0, 5); + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(deleteIndexRequest, expectedParams); + + RequestConvertersTests.setRandomIndicesOptions(deleteIndexRequest::indicesOptions, deleteIndexRequest::indicesOptions, + expectedParams); + + Request request = IndicesRequestConverters.deleteIndex(deleteIndexRequest); + Assert.assertEquals("/" + String.join(",", indices), request.getEndpoint()); + Assert.assertEquals(expectedParams, request.getParameters()); + Assert.assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + Assert.assertNull(request.getEntity()); + } + + public void testGetSettings() throws IOException { + String[] indicesUnderTest = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + + GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices(indicesUnderTest); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomMasterTimeout(getSettingsRequest, expectedParams); + RequestConvertersTests.setRandomIndicesOptions(getSettingsRequest::indicesOptions, getSettingsRequest::indicesOptions, + expectedParams); + + RequestConvertersTests.setRandomLocal(getSettingsRequest, expectedParams); + + if (ESTestCase.randomBoolean()) { + // the request object will not have include_defaults present unless it is set to + // true + getSettingsRequest.includeDefaults(ESTestCase.randomBoolean()); + if (getSettingsRequest.includeDefaults()) { + expectedParams.put("include_defaults", Boolean.toString(true)); + } + } + + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indicesUnderTest != null && indicesUnderTest.length > 0) { + endpoint.add(String.join(",", indicesUnderTest)); + } + endpoint.add("_settings"); + + if (ESTestCase.randomBoolean()) { + String[] names = ESTestCase.randomBoolean() ? null : new String[ESTestCase.randomIntBetween(0, 3)]; + if (names != null) { + for (int x = 0; x < names.length; x++) { + names[x] = ESTestCase.randomAlphaOfLengthBetween(3, 10); + } + } + getSettingsRequest.names(names); + if (names != null && names.length > 0) { + endpoint.add(String.join(",", names)); + } + } + + Request request = IndicesRequestConverters.getSettings(getSettingsRequest); + + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + Assert.assertThat(request.getEntity(), nullValue()); + } + + public void testGetIndex() throws IOException { + String[] indicesUnderTest = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + + GetIndexRequest getIndexRequest = new GetIndexRequest().indices(indicesUnderTest); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomMasterTimeout(getIndexRequest, expectedParams); + RequestConvertersTests.setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams); + RequestConvertersTests.setRandomLocal(getIndexRequest, expectedParams); + RequestConvertersTests.setRandomHumanReadable(getIndexRequest, expectedParams); + + if (ESTestCase.randomBoolean()) { + // the request object will not have include_defaults present unless it is set to + // true + getIndexRequest.includeDefaults(ESTestCase.randomBoolean()); + if (getIndexRequest.includeDefaults()) { + expectedParams.put("include_defaults", Boolean.toString(true)); + } + } + + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indicesUnderTest != null && indicesUnderTest.length > 0) { + endpoint.add(String.join(",", indicesUnderTest)); + } + + Request request = IndicesRequestConverters.getIndex(getIndexRequest); + + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + Assert.assertThat(request.getEntity(), nullValue()); + } + + public void testDeleteIndexEmptyIndices() { + String[] indices = ESTestCase.randomBoolean() ? null : Strings.EMPTY_ARRAY; + ActionRequestValidationException validationException = new DeleteIndexRequest(indices).validate(); + Assert.assertNotNull(validationException); + } + + public void testOpenIndex() { + String[] indices = RequestConvertersTests.randomIndicesNames(1, 5); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices); + openIndexRequest.indices(indices); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomTimeout(openIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(openIndexRequest, expectedParams); + RequestConvertersTests.setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams); + RequestConvertersTests.setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams); + + Request request = IndicesRequestConverters.openIndex(openIndexRequest); + StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open"); + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + Assert.assertThat(expectedParams, equalTo(request.getParameters())); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + Assert.assertThat(request.getEntity(), nullValue()); + } + + public void testOpenIndexEmptyIndices() { + String[] indices = ESTestCase.randomBoolean() ? null : Strings.EMPTY_ARRAY; + ActionRequestValidationException validationException = new OpenIndexRequest(indices).validate(); + Assert.assertNotNull(validationException); + } + + public void testCloseIndex() { + String[] indices = RequestConvertersTests.randomIndicesNames(1, 5); + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(indices); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomTimeout(closeIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(closeIndexRequest, expectedParams); + RequestConvertersTests.setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, + expectedParams); + + Request request = IndicesRequestConverters.closeIndex(closeIndexRequest); + StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close"); + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + Assert.assertThat(expectedParams, equalTo(request.getParameters())); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + Assert.assertThat(request.getEntity(), nullValue()); + } + + public void testCloseIndexEmptyIndices() { + String[] indices = ESTestCase.randomBoolean() ? null : Strings.EMPTY_ARRAY; + ActionRequestValidationException validationException = new CloseIndexRequest(indices).validate(); + Assert.assertNotNull(validationException); + } + + public void testRefresh() { + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + RefreshRequest refreshRequest; + if (ESTestCase.randomBoolean()) { + refreshRequest = new RefreshRequest(indices); + } else { + refreshRequest = new RefreshRequest(); + refreshRequest.indices(indices); + } + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(refreshRequest::indicesOptions, refreshRequest::indicesOptions, expectedParams); + Request request = IndicesRequestConverters.refresh(refreshRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_refresh"); + Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getEntity(), nullValue()); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + + public void testFlush() { + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + FlushRequest flushRequest; + if (ESTestCase.randomBoolean()) { + flushRequest = new FlushRequest(indices); + } else { + flushRequest = new FlushRequest(); + flushRequest.indices(indices); + } + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(flushRequest::indicesOptions, flushRequest::indicesOptions, expectedParams); + if (ESTestCase.randomBoolean()) { + flushRequest.force(ESTestCase.randomBoolean()); + } + expectedParams.put("force", Boolean.toString(flushRequest.force())); + if (ESTestCase.randomBoolean()) { + flushRequest.waitIfOngoing(ESTestCase.randomBoolean()); + } + expectedParams.put("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); + + Request request = IndicesRequestConverters.flush(flushRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_flush"); + Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getEntity(), nullValue()); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + + public void testSyncedFlush() { + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + SyncedFlushRequest syncedFlushRequest; + if (ESTestCase.randomBoolean()) { + syncedFlushRequest = new SyncedFlushRequest(indices); + } else { + syncedFlushRequest = new SyncedFlushRequest(); + syncedFlushRequest.indices(indices); + } + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(syncedFlushRequest::indicesOptions, syncedFlushRequest::indicesOptions, + expectedParams); + Request request = IndicesRequestConverters.flushSynced(syncedFlushRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_flush/synced"); + Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getEntity(), nullValue()); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + + public void testForceMerge() { + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + ForceMergeRequest forceMergeRequest; + if (ESTestCase.randomBoolean()) { + forceMergeRequest = new ForceMergeRequest(indices); + } else { + forceMergeRequest = new ForceMergeRequest(); + forceMergeRequest.indices(indices); + } + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(forceMergeRequest::indicesOptions, forceMergeRequest::indicesOptions, + expectedParams); + if (ESTestCase.randomBoolean()) { + forceMergeRequest.maxNumSegments(ESTestCase.randomInt()); + } + expectedParams.put("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); + if (ESTestCase.randomBoolean()) { + forceMergeRequest.onlyExpungeDeletes(ESTestCase.randomBoolean()); + } + expectedParams.put("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); + if (ESTestCase.randomBoolean()) { + forceMergeRequest.flush(ESTestCase.randomBoolean()); + } + expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush())); + + Request request = IndicesRequestConverters.forceMerge(forceMergeRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_forcemerge"); + Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getEntity(), nullValue()); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + + public void testClearCache() { + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + ClearIndicesCacheRequest clearIndicesCacheRequest; + if (ESTestCase.randomBoolean()) { + clearIndicesCacheRequest = new ClearIndicesCacheRequest(indices); + } else { + clearIndicesCacheRequest = new ClearIndicesCacheRequest(); + clearIndicesCacheRequest.indices(indices); + } + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(clearIndicesCacheRequest::indicesOptions, clearIndicesCacheRequest::indicesOptions, + expectedParams); + if (ESTestCase.randomBoolean()) { + clearIndicesCacheRequest.queryCache(ESTestCase.randomBoolean()); + } + expectedParams.put("query", Boolean.toString(clearIndicesCacheRequest.queryCache())); + if (ESTestCase.randomBoolean()) { + clearIndicesCacheRequest.fieldDataCache(ESTestCase.randomBoolean()); + } + expectedParams.put("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache())); + if (ESTestCase.randomBoolean()) { + clearIndicesCacheRequest.requestCache(ESTestCase.randomBoolean()); + } + expectedParams.put("request", Boolean.toString(clearIndicesCacheRequest.requestCache())); + if (ESTestCase.randomBoolean()) { + clearIndicesCacheRequest.fields(RequestConvertersTests.randomIndicesNames(1, 5)); + expectedParams.put("fields", String.join(",", clearIndicesCacheRequest.fields())); + } + + Request request = IndicesRequestConverters.clearCache(clearIndicesCacheRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_cache/clear"); + Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getEntity(), nullValue()); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + + public void testExistsAlias() { + GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + getAliasesRequest.indices(indices); + // the HEAD endpoint requires at least an alias or an index + boolean hasIndices = indices != null && indices.length > 0; + String[] aliases; + if (hasIndices) { + aliases = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + } else { + aliases = RequestConvertersTests.randomIndicesNames(1, 5); + } + getAliasesRequest.aliases(aliases); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomLocal(getAliasesRequest, expectedParams); + RequestConvertersTests.setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, + expectedParams); + + Request request = IndicesRequestConverters.existsAlias(getAliasesRequest); + StringJoiner expectedEndpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + expectedEndpoint.add(String.join(",", indices)); + } + expectedEndpoint.add("_alias"); + if (aliases != null && aliases.length > 0) { + expectedEndpoint.add(String.join(",", aliases)); + } + Assert.assertEquals(HttpHead.METHOD_NAME, request.getMethod()); + Assert.assertEquals(expectedEndpoint.toString(), request.getEndpoint()); + Assert.assertEquals(expectedParams, request.getParameters()); + Assert.assertNull(request.getEntity()); + } + + public void testExistsAliasNoAliasNoIndex() { + { + GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); + IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, + () -> IndicesRequestConverters.existsAlias(getAliasesRequest)); + Assert.assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); + } + { + GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[]) null); + getAliasesRequest.indices((String[]) null); + IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, + () -> IndicesRequestConverters.existsAlias(getAliasesRequest)); + Assert.assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); + } + } + + public void testSplit() throws IOException { + resizeTest(ResizeType.SPLIT, IndicesRequestConverters::split); + } + + public void testSplitWrongResizeType() { + ResizeRequest resizeRequest = new ResizeRequest("target", "source"); + resizeRequest.setResizeType(ResizeType.SHRINK); + IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, () + -> IndicesRequestConverters.split(resizeRequest)); + Assert.assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage()); + } + + public void testShrinkWrongResizeType() { + ResizeRequest resizeRequest = new ResizeRequest("target", "source"); + resizeRequest.setResizeType(ResizeType.SPLIT); + IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, () + -> IndicesRequestConverters.shrink(resizeRequest)); + Assert.assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage()); + } + + public void testShrink() throws IOException { + resizeTest(ResizeType.SHRINK, IndicesRequestConverters::shrink); + } + + private void resizeTest(ResizeType resizeType, CheckedFunction function) + throws IOException { + String[] indices = RequestConvertersTests.randomIndicesNames(2, 2); + ResizeRequest resizeRequest = new ResizeRequest(indices[0], indices[1]); + resizeRequest.setResizeType(resizeType); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomMasterTimeout(resizeRequest, expectedParams); + RequestConvertersTests.setRandomTimeout(resizeRequest::timeout, resizeRequest.timeout(), expectedParams); + + if (ESTestCase.randomBoolean()) { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(ESTestCase.randomAlphaOfLengthBetween(3, 10)); + if (ESTestCase.randomBoolean()) { + createIndexRequest.settings(randomIndexSettings()); + } + if (ESTestCase.randomBoolean()) { + randomAliases(createIndexRequest); + } + resizeRequest.setTargetIndex(createIndexRequest); + } + RequestConvertersTests.setRandomWaitForActiveShards(resizeRequest::setWaitForActiveShards, expectedParams); + + Request request = function.apply(resizeRequest); + Assert.assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + String expectedEndpoint = "/" + resizeRequest.getSourceIndex() + "/_" + resizeType.name().toLowerCase(Locale.ROOT) + "/" + + resizeRequest.getTargetIndexRequest().index(); + Assert.assertEquals(expectedEndpoint, request.getEndpoint()); + Assert.assertEquals(expectedParams, request.getParameters()); + RequestConvertersTests.assertToXContentBody(resizeRequest, request.getEntity()); + } + + public void testRollover() throws IOException { + RolloverRequest rolloverRequest = new RolloverRequest(ESTestCase.randomAlphaOfLengthBetween(3, 10), + ESTestCase.randomBoolean() ? null : ESTestCase.randomAlphaOfLengthBetween(3, 10)); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomTimeout(rolloverRequest::timeout, rolloverRequest.timeout(), expectedParams); + RequestConvertersTests.setRandomMasterTimeout(rolloverRequest, expectedParams); + if (ESTestCase.randomBoolean()) { + rolloverRequest.dryRun(ESTestCase.randomBoolean()); + if (rolloverRequest.isDryRun()) { + expectedParams.put("dry_run", "true"); + } + } + if (ESTestCase.randomBoolean()) { + rolloverRequest.addMaxIndexAgeCondition(new TimeValue(ESTestCase.randomNonNegativeLong())); + } + if (ESTestCase.randomBoolean()) { + String type = ESTestCase.randomAlphaOfLengthBetween(3, 10); + rolloverRequest.getCreateIndexRequest().mapping(type, RandomCreateIndexGenerator.randomMapping(type)); + } + if (ESTestCase.randomBoolean()) { + RandomCreateIndexGenerator.randomAliases(rolloverRequest.getCreateIndexRequest()); + } + if (ESTestCase.randomBoolean()) { + rolloverRequest.getCreateIndexRequest().settings(RandomCreateIndexGenerator.randomIndexSettings()); + } + RequestConvertersTests.setRandomWaitForActiveShards(rolloverRequest.getCreateIndexRequest()::waitForActiveShards, expectedParams); + + Request request = IndicesRequestConverters.rollover(rolloverRequest); + if (rolloverRequest.getNewIndexName() == null) { + Assert.assertEquals("/" + rolloverRequest.getAlias() + "/_rollover", request.getEndpoint()); + } else { + Assert.assertEquals("/" + rolloverRequest.getAlias() + "/_rollover/" + rolloverRequest.getNewIndexName(), + request.getEndpoint()); + } + Assert.assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + RequestConvertersTests.assertToXContentBody(rolloverRequest, request.getEntity()); + Assert.assertEquals(expectedParams, request.getParameters()); + } + + public void testGetAlias() { + GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomLocal(getAliasesRequest, expectedParams); + RequestConvertersTests.setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, + expectedParams); + + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 2); + String[] aliases = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 2); + getAliasesRequest.indices(indices); + getAliasesRequest.aliases(aliases); + + Request request = IndicesRequestConverters.getAlias(getAliasesRequest); + StringJoiner expectedEndpoint = new StringJoiner("/", "/", ""); + + if (false == CollectionUtils.isEmpty(indices)) { + expectedEndpoint.add(String.join(",", indices)); + } + expectedEndpoint.add("_alias"); + + if (false == CollectionUtils.isEmpty(aliases)) { + expectedEndpoint.add(String.join(",", aliases)); + } + + Assert.assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + Assert.assertEquals(expectedEndpoint.toString(), request.getEndpoint()); + Assert.assertEquals(expectedParams, request.getParameters()); + Assert.assertNull(request.getEntity()); + } + + public void testIndexPutSettings() throws IOException { + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 2); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomMasterTimeout(updateSettingsRequest, expectedParams); + RequestConvertersTests.setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + RequestConvertersTests.setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, + expectedParams); + if (ESTestCase.randomBoolean()) { + updateSettingsRequest.setPreserveExisting(ESTestCase.randomBoolean()); + if (updateSettingsRequest.isPreserveExisting()) { + expectedParams.put("preserve_existing", "true"); + } + } + + Request request = IndicesRequestConverters.indexPutSettings(updateSettingsRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_settings"); + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + Assert.assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + RequestConvertersTests.assertToXContentBody(updateSettingsRequest, request.getEntity()); + Assert.assertEquals(expectedParams, request.getParameters()); + } + + public void testPutTemplateRequest() throws Exception { + Map names = new HashMap<>(); + names.put("log", "log"); + names.put("template#1", "template%231"); + names.put("-#template", "-%23template"); + names.put("foo^bar", "foo%5Ebar"); + + PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest().name(ESTestCase.randomFrom(names.keySet())) + .patterns(Arrays.asList(ESTestCase.generateRandomStringArray(20, 100, false, false))); + if (ESTestCase.randomBoolean()) { + putTemplateRequest.order(ESTestCase.randomInt()); + } + if (ESTestCase.randomBoolean()) { + putTemplateRequest.version(ESTestCase.randomInt()); + } + if (ESTestCase.randomBoolean()) { + putTemplateRequest.settings(Settings.builder().put("setting-" + ESTestCase.randomInt(), ESTestCase.randomTimeValue())); + } + if (ESTestCase.randomBoolean()) { + putTemplateRequest.mapping("doc-" + ESTestCase.randomInt(), + "field-" + ESTestCase.randomInt(), "type=" + ESTestCase.randomFrom("text", "keyword")); + } + if (ESTestCase.randomBoolean()) { + putTemplateRequest.alias(new Alias("alias-" + ESTestCase.randomInt())); + } + Map expectedParams = new HashMap<>(); + if (ESTestCase.randomBoolean()) { + expectedParams.put("create", Boolean.TRUE.toString()); + putTemplateRequest.create(true); + } + if (ESTestCase.randomBoolean()) { + String cause = ESTestCase.randomUnicodeOfCodepointLengthBetween(1, 50); + putTemplateRequest.cause(cause); + expectedParams.put("cause", cause); + } + RequestConvertersTests.setRandomMasterTimeout(putTemplateRequest, expectedParams); + Request request = IndicesRequestConverters.putTemplate(putTemplateRequest); + Assert.assertThat(request.getEndpoint(), equalTo("/_template/" + names.get(putTemplateRequest.name()))); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + RequestConvertersTests.assertToXContentBody(putTemplateRequest, request.getEntity()); + } + + public void testValidateQuery() throws Exception { + String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); + String[] types = ESTestCase.randomBoolean() ? ESTestCase.generateRandomStringArray(5, 5, false, false) : null; + ValidateQueryRequest validateQueryRequest; + if (ESTestCase.randomBoolean()) { + validateQueryRequest = new ValidateQueryRequest(indices); + } else { + validateQueryRequest = new ValidateQueryRequest(); + validateQueryRequest.indices(indices); + } + validateQueryRequest.types(types); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(validateQueryRequest::indicesOptions, validateQueryRequest::indicesOptions, + expectedParams); + validateQueryRequest.explain(ESTestCase.randomBoolean()); + validateQueryRequest.rewrite(ESTestCase.randomBoolean()); + validateQueryRequest.allShards(ESTestCase.randomBoolean()); + expectedParams.put("explain", Boolean.toString(validateQueryRequest.explain())); + expectedParams.put("rewrite", Boolean.toString(validateQueryRequest.rewrite())); + expectedParams.put("all_shards", Boolean.toString(validateQueryRequest.allShards())); + Request request = IndicesRequestConverters.validateQuery(validateQueryRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + if (types != null && types.length > 0) { + endpoint.add(String.join(",", types)); + } + } + endpoint.add("_validate/query"); + Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + RequestConvertersTests.assertToXContentBody(validateQueryRequest, request.getEntity()); + Assert.assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + } + + public void testGetTemplateRequest() throws Exception { + Map encodes = new HashMap<>(); + encodes.put("log", "log"); + encodes.put("1", "1"); + encodes.put("template#1", "template%231"); + encodes.put("template-*", "template-*"); + encodes.put("foo^bar", "foo%5Ebar"); + List names = ESTestCase.randomSubsetOf(1, encodes.keySet()); + GetIndexTemplatesRequest getTemplatesRequest = new GetIndexTemplatesRequest().names(names.toArray(new String[0])); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomMasterTimeout(getTemplatesRequest, expectedParams); + RequestConvertersTests.setRandomLocal(getTemplatesRequest, expectedParams); + Request request = IndicesRequestConverters.getTemplates(getTemplatesRequest); + Assert.assertThat(request.getEndpoint(), + equalTo("/_template/" + names.stream().map(encodes::get).collect(Collectors.joining(",")))); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getEntity(), nullValue()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 4ef8e8542c95e..6d073a7a60a8f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -27,37 +27,12 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.util.EntityUtils; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; -import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeType; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -81,22 +56,18 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.RequestConverters.EndpointBuilder; import org.elasticsearch.common.CheckedBiConsumer; -import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; @@ -141,16 +112,11 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; -import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; import static org.elasticsearch.client.RequestConverters.enforceSameContentType; -import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases; -import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest; -import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings; -import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; @@ -261,30 +227,6 @@ public void testExists() { getAndExistsTest(RequestConverters::exists, HttpHead.METHOD_NAME); } - public void testIndicesExist() { - String[] indices = randomIndicesNames(1, 10); - - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(indices); - - Map expectedParams = new HashMap<>(); - setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams); - setRandomLocal(getIndexRequest, expectedParams); - setRandomHumanReadable(getIndexRequest, expectedParams); - setRandomIncludeDefaults(getIndexRequest, expectedParams); - - final Request request = RequestConverters.indicesExist(getIndexRequest); - - assertEquals(HttpHead.METHOD_NAME, request.getMethod()); - assertEquals("/" + String.join(",", indices), request.getEndpoint()); - assertThat(expectedParams, equalTo(request.getParameters())); - assertNull(request.getEntity()); - } - - public void testIndicesExistEmptyIndices() { - expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest())); - expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[]) null))); - } - private static void getAndExistsTest(Function requestConverter, String method) { String index = randomAlphaOfLengthBetween(3, 10); String type = randomAlphaOfLengthBetween(3, 10); @@ -343,41 +285,6 @@ private static void getAndExistsTest(Function requestConver assertEquals(method, request.getMethod()); } - public void testCreateIndex() throws IOException { - CreateIndexRequest createIndexRequest = randomCreateIndexRequest(); - - Map expectedParams = new HashMap<>(); - setRandomTimeout(createIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomMasterTimeout(createIndexRequest, expectedParams); - setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); - - Request request = RequestConverters.createIndex(createIndexRequest); - assertEquals("/" + createIndexRequest.index(), request.getEndpoint()); - assertEquals(expectedParams, request.getParameters()); - assertEquals(HttpPut.METHOD_NAME, request.getMethod()); - assertToXContentBody(createIndexRequest, request.getEntity()); - } - - public void testCreateIndexNullIndex() { - ActionRequestValidationException validationException = new CreateIndexRequest(null).validate(); - assertNotNull(validationException); - } - - public void testUpdateAliases() throws IOException { - IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - AliasActions aliasAction = randomAliasAction(); - indicesAliasesRequest.addAliasAction(aliasAction); - - Map expectedParams = new HashMap<>(); - setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomMasterTimeout(indicesAliasesRequest, expectedParams); - - Request request = RequestConverters.updateAliases(indicesAliasesRequest); - assertEquals("/_aliases", request.getEndpoint()); - assertEquals(expectedParams, request.getParameters()); - assertToXContentBody(indicesAliasesRequest, request.getEntity()); - } - public void testReindex() throws IOException { ReindexRequest reindexRequest = new ReindexRequest(); reindexRequest.setSourceIndices("source_idx"); @@ -537,282 +444,6 @@ public void testDeleteByQuery() throws IOException { assertToXContentBody(deleteByQueryRequest, request.getEntity()); } - public void testPutMapping() throws IOException { - PutMappingRequest putMappingRequest = new PutMappingRequest(); - - String[] indices = randomIndicesNames(0, 5); - putMappingRequest.indices(indices); - - String type = randomAlphaOfLengthBetween(3, 10); - putMappingRequest.type(type); - - Map expectedParams = new HashMap<>(); - - setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomMasterTimeout(putMappingRequest, expectedParams); - - Request request = RequestConverters.putMapping(putMappingRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - String index = String.join(",", indices); - if (Strings.hasLength(index)) { - endpoint.add(index); - } - endpoint.add("_mapping"); - endpoint.add(type); - assertEquals(endpoint.toString(), request.getEndpoint()); - - assertEquals(expectedParams, request.getParameters()); - assertEquals(HttpPut.METHOD_NAME, request.getMethod()); - assertToXContentBody(putMappingRequest, request.getEntity()); - } - - public void testGetMapping() throws IOException { - GetMappingsRequest getMappingRequest = new GetMappingsRequest(); - - String[] indices = Strings.EMPTY_ARRAY; - if (randomBoolean()) { - indices = randomIndicesNames(0, 5); - getMappingRequest.indices(indices); - } else if (randomBoolean()) { - getMappingRequest.indices((String[]) null); - } - - String type = null; - if (randomBoolean()) { - type = randomAlphaOfLengthBetween(3, 10); - getMappingRequest.types(type); - } else if (randomBoolean()) { - getMappingRequest.types((String[]) null); - } - - Map expectedParams = new HashMap<>(); - - setRandomIndicesOptions(getMappingRequest::indicesOptions, getMappingRequest::indicesOptions, expectedParams); - setRandomMasterTimeout(getMappingRequest, expectedParams); - setRandomLocal(getMappingRequest, expectedParams); - - Request request = RequestConverters.getMappings(getMappingRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - String index = String.join(",", indices); - if (Strings.hasLength(index)) { - endpoint.add(index); - } - endpoint.add("_mapping"); - if (type != null) { - endpoint.add(type); - } - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - - assertThat(expectedParams, equalTo(request.getParameters())); - assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); - } - - public void testGetFieldMapping() throws IOException { - GetFieldMappingsRequest getFieldMappingsRequest = new GetFieldMappingsRequest(); - - String[] indices = Strings.EMPTY_ARRAY; - if (randomBoolean()) { - indices = randomIndicesNames(0, 5); - getFieldMappingsRequest.indices(indices); - } else if (randomBoolean()) { - getFieldMappingsRequest.indices((String[]) null); - } - - String type = null; - if (randomBoolean()) { - type = randomAlphaOfLengthBetween(3, 10); - getFieldMappingsRequest.types(type); - } else if (randomBoolean()) { - getFieldMappingsRequest.types((String[]) null); - } - - String[] fields = null; - if (randomBoolean()) { - fields = new String[randomIntBetween(1, 5)]; - for (int i = 0; i < fields.length; i++) { - fields[i] = randomAlphaOfLengthBetween(3, 10); - } - getFieldMappingsRequest.fields(fields); - } else if (randomBoolean()) { - getFieldMappingsRequest.fields((String[]) null); - } - - Map expectedParams = new HashMap<>(); - - setRandomIndicesOptions(getFieldMappingsRequest::indicesOptions, getFieldMappingsRequest::indicesOptions, expectedParams); - setRandomLocal(getFieldMappingsRequest::local, expectedParams); - - Request request = RequestConverters.getFieldMapping(getFieldMappingsRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - String index = String.join(",", indices); - if (Strings.hasLength(index)) { - endpoint.add(index); - } - endpoint.add("_mapping"); - if (type != null) { - endpoint.add(type); - } - endpoint.add("field"); - if (fields != null) { - endpoint.add(String.join(",", fields)); - } - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - - assertThat(expectedParams, equalTo(request.getParameters())); - assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); - } - - public void testDeleteIndex() { - String[] indices = randomIndicesNames(0, 5); - DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices); - - Map expectedParams = new HashMap<>(); - setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomMasterTimeout(deleteIndexRequest, expectedParams); - - setRandomIndicesOptions(deleteIndexRequest::indicesOptions, deleteIndexRequest::indicesOptions, expectedParams); - - Request request = RequestConverters.deleteIndex(deleteIndexRequest); - assertEquals("/" + String.join(",", indices), request.getEndpoint()); - assertEquals(expectedParams, request.getParameters()); - assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); - assertNull(request.getEntity()); - } - - public void testGetSettings() throws IOException { - String[] indicesUnderTest = randomBoolean() ? null : randomIndicesNames(0, 5); - - GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices(indicesUnderTest); - - Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(getSettingsRequest, expectedParams); - setRandomIndicesOptions(getSettingsRequest::indicesOptions, getSettingsRequest::indicesOptions, expectedParams); - - setRandomLocal(getSettingsRequest, expectedParams); - - if (randomBoolean()) { - // the request object will not have include_defaults present unless it is set to - // true - getSettingsRequest.includeDefaults(randomBoolean()); - if (getSettingsRequest.includeDefaults()) { - expectedParams.put("include_defaults", Boolean.toString(true)); - } - } - - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indicesUnderTest != null && indicesUnderTest.length > 0) { - endpoint.add(String.join(",", indicesUnderTest)); - } - endpoint.add("_settings"); - - if (randomBoolean()) { - String[] names = randomBoolean() ? null : new String[randomIntBetween(0, 3)]; - if (names != null) { - for (int x = 0; x < names.length; x++) { - names[x] = randomAlphaOfLengthBetween(3, 10); - } - } - getSettingsRequest.names(names); - if (names != null && names.length > 0) { - endpoint.add(String.join(",", names)); - } - } - - Request request = RequestConverters.getSettings(getSettingsRequest); - - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); - assertThat(request.getEntity(), nullValue()); - } - - public void testGetIndex() throws IOException { - String[] indicesUnderTest = randomBoolean() ? null : randomIndicesNames(0, 5); - - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(indicesUnderTest); - - Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(getIndexRequest, expectedParams); - setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams); - setRandomLocal(getIndexRequest, expectedParams); - setRandomHumanReadable(getIndexRequest, expectedParams); - - if (randomBoolean()) { - // the request object will not have include_defaults present unless it is set to - // true - getIndexRequest.includeDefaults(randomBoolean()); - if (getIndexRequest.includeDefaults()) { - expectedParams.put("include_defaults", Boolean.toString(true)); - } - } - - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indicesUnderTest != null && indicesUnderTest.length > 0) { - endpoint.add(String.join(",", indicesUnderTest)); - } - - Request request = RequestConverters.getIndex(getIndexRequest); - - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); - assertThat(request.getEntity(), nullValue()); - } - - public void testDeleteIndexEmptyIndices() { - String[] indices = randomBoolean() ? null : Strings.EMPTY_ARRAY; - ActionRequestValidationException validationException = new DeleteIndexRequest(indices).validate(); - assertNotNull(validationException); - } - - public void testOpenIndex() { - String[] indices = randomIndicesNames(1, 5); - OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices); - openIndexRequest.indices(indices); - - Map expectedParams = new HashMap<>(); - setRandomTimeout(openIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomMasterTimeout(openIndexRequest, expectedParams); - setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams); - setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams); - - Request request = RequestConverters.openIndex(openIndexRequest); - StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open"); - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertThat(expectedParams, equalTo(request.getParameters())); - assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - assertThat(request.getEntity(), nullValue()); - } - - public void testOpenIndexEmptyIndices() { - String[] indices = randomBoolean() ? null : Strings.EMPTY_ARRAY; - ActionRequestValidationException validationException = new OpenIndexRequest(indices).validate(); - assertNotNull(validationException); - } - - public void testCloseIndex() { - String[] indices = randomIndicesNames(1, 5); - CloseIndexRequest closeIndexRequest = new CloseIndexRequest(indices); - - Map expectedParams = new HashMap<>(); - setRandomTimeout(closeIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomMasterTimeout(closeIndexRequest, expectedParams); - setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams); - - Request request = RequestConverters.closeIndex(closeIndexRequest); - StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close"); - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertThat(expectedParams, equalTo(request.getParameters())); - assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - assertThat(request.getEntity(), nullValue()); - } - - public void testCloseIndexEmptyIndices() { - String[] indices = randomBoolean() ? null : Strings.EMPTY_ARRAY; - ActionRequestValidationException validationException = new CloseIndexRequest(indices).validate(); - assertNotNull(validationException); - } - public void testIndex() throws IOException { String index = randomAlphaOfLengthBetween(3, 10); String type = randomAlphaOfLengthBetween(3, 10); @@ -886,161 +517,6 @@ public void testIndex() throws IOException { } } - public void testRefresh() { - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); - RefreshRequest refreshRequest; - if (randomBoolean()) { - refreshRequest = new RefreshRequest(indices); - } else { - refreshRequest = new RefreshRequest(); - refreshRequest.indices(indices); - } - Map expectedParams = new HashMap<>(); - setRandomIndicesOptions(refreshRequest::indicesOptions, refreshRequest::indicesOptions, expectedParams); - Request request = RequestConverters.refresh(refreshRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_refresh"); - assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getEntity(), nullValue()); - assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - } - - public void testFlush() { - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); - FlushRequest flushRequest; - if (randomBoolean()) { - flushRequest = new FlushRequest(indices); - } else { - flushRequest = new FlushRequest(); - flushRequest.indices(indices); - } - Map expectedParams = new HashMap<>(); - setRandomIndicesOptions(flushRequest::indicesOptions, flushRequest::indicesOptions, expectedParams); - if (randomBoolean()) { - flushRequest.force(randomBoolean()); - } - expectedParams.put("force", Boolean.toString(flushRequest.force())); - if (randomBoolean()) { - flushRequest.waitIfOngoing(randomBoolean()); - } - expectedParams.put("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); - - Request request = RequestConverters.flush(flushRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_flush"); - assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getEntity(), nullValue()); - assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - } - - public void testSyncedFlush() { - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); - SyncedFlushRequest syncedFlushRequest; - if (randomBoolean()) { - syncedFlushRequest = new SyncedFlushRequest(indices); - } else { - syncedFlushRequest = new SyncedFlushRequest(); - syncedFlushRequest.indices(indices); - } - Map expectedParams = new HashMap<>(); - setRandomIndicesOptions(syncedFlushRequest::indicesOptions, syncedFlushRequest::indicesOptions, expectedParams); - Request request = RequestConverters.flushSynced(syncedFlushRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_flush/synced"); - assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getEntity(), nullValue()); - assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - } - - public void testForceMerge() { - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); - ForceMergeRequest forceMergeRequest; - if (randomBoolean()) { - forceMergeRequest = new ForceMergeRequest(indices); - } else { - forceMergeRequest = new ForceMergeRequest(); - forceMergeRequest.indices(indices); - } - - Map expectedParams = new HashMap<>(); - setRandomIndicesOptions(forceMergeRequest::indicesOptions, forceMergeRequest::indicesOptions, expectedParams); - if (randomBoolean()) { - forceMergeRequest.maxNumSegments(randomInt()); - } - expectedParams.put("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); - if (randomBoolean()) { - forceMergeRequest.onlyExpungeDeletes(randomBoolean()); - } - expectedParams.put("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); - if (randomBoolean()) { - forceMergeRequest.flush(randomBoolean()); - } - expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush())); - - Request request = RequestConverters.forceMerge(forceMergeRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_forcemerge"); - assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getEntity(), nullValue()); - assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - } - - public void testClearCache() { - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); - ClearIndicesCacheRequest clearIndicesCacheRequest; - if (randomBoolean()) { - clearIndicesCacheRequest = new ClearIndicesCacheRequest(indices); - } else { - clearIndicesCacheRequest = new ClearIndicesCacheRequest(); - clearIndicesCacheRequest.indices(indices); - } - Map expectedParams = new HashMap<>(); - setRandomIndicesOptions(clearIndicesCacheRequest::indicesOptions, clearIndicesCacheRequest::indicesOptions, expectedParams); - if (randomBoolean()) { - clearIndicesCacheRequest.queryCache(randomBoolean()); - } - expectedParams.put("query", Boolean.toString(clearIndicesCacheRequest.queryCache())); - if (randomBoolean()) { - clearIndicesCacheRequest.fieldDataCache(randomBoolean()); - } - expectedParams.put("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache())); - if (randomBoolean()) { - clearIndicesCacheRequest.requestCache(randomBoolean()); - } - expectedParams.put("request", Boolean.toString(clearIndicesCacheRequest.requestCache())); - if (randomBoolean()) { - clearIndicesCacheRequest.fields(randomIndicesNames(1, 5)); - expectedParams.put("fields", String.join(",", clearIndicesCacheRequest.fields())); - } - - Request request = RequestConverters.clearCache(clearIndicesCacheRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_cache/clear"); - assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getEntity(), nullValue()); - assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - } - public void testUpdate() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); @@ -1598,54 +1074,6 @@ public void testMultiSearchTemplate() throws Exception { assertEquals(new BytesArray(expectedBytes), new BytesArray(EntityUtils.toByteArray(actualEntity))); } - public void testExistsAlias() { - GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); - getAliasesRequest.indices(indices); - // the HEAD endpoint requires at least an alias or an index - boolean hasIndices = indices != null && indices.length > 0; - String[] aliases; - if (hasIndices) { - aliases = randomBoolean() ? null : randomIndicesNames(0, 5); - } else { - aliases = randomIndicesNames(1, 5); - } - getAliasesRequest.aliases(aliases); - Map expectedParams = new HashMap<>(); - setRandomLocal(getAliasesRequest, expectedParams); - setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams); - - Request request = RequestConverters.existsAlias(getAliasesRequest); - StringJoiner expectedEndpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - expectedEndpoint.add(String.join(",", indices)); - } - expectedEndpoint.add("_alias"); - if (aliases != null && aliases.length > 0) { - expectedEndpoint.add(String.join(",", aliases)); - } - assertEquals(HttpHead.METHOD_NAME, request.getMethod()); - assertEquals(expectedEndpoint.toString(), request.getEndpoint()); - assertEquals(expectedParams, request.getParameters()); - assertNull(request.getEntity()); - } - - public void testExistsAliasNoAliasNoIndex() { - { - GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, - () -> RequestConverters.existsAlias(getAliasesRequest)); - assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); - } - { - GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[]) null); - getAliasesRequest.indices((String[]) null); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, - () -> RequestConverters.existsAlias(getAliasesRequest)); - assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); - } - } - public void testExplain() throws IOException { String index = randomAlphaOfLengthBetween(3, 10); String type = randomAlphaOfLengthBetween(3, 10); @@ -1749,245 +1177,6 @@ public void testRankEval() throws Exception { assertToXContentBody(spec, request.getEntity()); } - public void testSplit() throws IOException { - resizeTest(ResizeType.SPLIT, RequestConverters::split); - } - - public void testSplitWrongResizeType() { - ResizeRequest resizeRequest = new ResizeRequest("target", "source"); - resizeRequest.setResizeType(ResizeType.SHRINK); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> RequestConverters.split(resizeRequest)); - assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage()); - } - - public void testShrinkWrongResizeType() { - ResizeRequest resizeRequest = new ResizeRequest("target", "source"); - resizeRequest.setResizeType(ResizeType.SPLIT); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> RequestConverters.shrink(resizeRequest)); - assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage()); - } - - public void testShrink() throws IOException { - resizeTest(ResizeType.SHRINK, RequestConverters::shrink); - } - - private static void resizeTest(ResizeType resizeType, CheckedFunction function) - throws IOException { - String[] indices = randomIndicesNames(2, 2); - ResizeRequest resizeRequest = new ResizeRequest(indices[0], indices[1]); - resizeRequest.setResizeType(resizeType); - Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(resizeRequest, expectedParams); - setRandomTimeout(resizeRequest::timeout, resizeRequest.timeout(), expectedParams); - - if (randomBoolean()) { - CreateIndexRequest createIndexRequest = new CreateIndexRequest(randomAlphaOfLengthBetween(3, 10)); - if (randomBoolean()) { - createIndexRequest.settings(randomIndexSettings()); - } - if (randomBoolean()) { - randomAliases(createIndexRequest); - } - resizeRequest.setTargetIndex(createIndexRequest); - } - setRandomWaitForActiveShards(resizeRequest::setWaitForActiveShards, expectedParams); - - Request request = function.apply(resizeRequest); - assertEquals(HttpPut.METHOD_NAME, request.getMethod()); - String expectedEndpoint = "/" + resizeRequest.getSourceIndex() + "/_" + resizeType.name().toLowerCase(Locale.ROOT) + "/" - + resizeRequest.getTargetIndexRequest().index(); - assertEquals(expectedEndpoint, request.getEndpoint()); - assertEquals(expectedParams, request.getParameters()); - assertToXContentBody(resizeRequest, request.getEntity()); - } - - public void testRollover() throws IOException { - RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10), - randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10)); - Map expectedParams = new HashMap<>(); - setRandomTimeout(rolloverRequest::timeout, rolloverRequest.timeout(), expectedParams); - setRandomMasterTimeout(rolloverRequest, expectedParams); - if (randomBoolean()) { - rolloverRequest.dryRun(randomBoolean()); - if (rolloverRequest.isDryRun()) { - expectedParams.put("dry_run", "true"); - } - } - if (randomBoolean()) { - rolloverRequest.addMaxIndexAgeCondition(new TimeValue(randomNonNegativeLong())); - } - if (randomBoolean()) { - String type = randomAlphaOfLengthBetween(3, 10); - rolloverRequest.getCreateIndexRequest().mapping(type, RandomCreateIndexGenerator.randomMapping(type)); - } - if (randomBoolean()) { - RandomCreateIndexGenerator.randomAliases(rolloverRequest.getCreateIndexRequest()); - } - if (randomBoolean()) { - rolloverRequest.getCreateIndexRequest().settings(RandomCreateIndexGenerator.randomIndexSettings()); - } - setRandomWaitForActiveShards(rolloverRequest.getCreateIndexRequest()::waitForActiveShards, expectedParams); - - Request request = RequestConverters.rollover(rolloverRequest); - if (rolloverRequest.getNewIndexName() == null) { - assertEquals("/" + rolloverRequest.getAlias() + "/_rollover", request.getEndpoint()); - } else { - assertEquals("/" + rolloverRequest.getAlias() + "/_rollover/" + rolloverRequest.getNewIndexName(), request.getEndpoint()); - } - assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertToXContentBody(rolloverRequest, request.getEntity()); - assertEquals(expectedParams, request.getParameters()); - } - - public void testGetAlias() { - GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); - - Map expectedParams = new HashMap<>(); - setRandomLocal(getAliasesRequest, expectedParams); - setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams); - - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2); - String[] aliases = randomBoolean() ? null : randomIndicesNames(0, 2); - getAliasesRequest.indices(indices); - getAliasesRequest.aliases(aliases); - - Request request = RequestConverters.getAlias(getAliasesRequest); - StringJoiner expectedEndpoint = new StringJoiner("/", "/", ""); - - if (false == CollectionUtils.isEmpty(indices)) { - expectedEndpoint.add(String.join(",", indices)); - } - expectedEndpoint.add("_alias"); - - if (false == CollectionUtils.isEmpty(aliases)) { - expectedEndpoint.add(String.join(",", aliases)); - } - - assertEquals(HttpGet.METHOD_NAME, request.getMethod()); - assertEquals(expectedEndpoint.toString(), request.getEndpoint()); - assertEquals(expectedParams, request.getParameters()); - assertNull(request.getEntity()); - } - - public void testIndexPutSettings() throws IOException { - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2); - UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); - Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(updateSettingsRequest, expectedParams); - setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, expectedParams); - if (randomBoolean()) { - updateSettingsRequest.setPreserveExisting(randomBoolean()); - if (updateSettingsRequest.isPreserveExisting()) { - expectedParams.put("preserve_existing", "true"); - } - } - - Request request = RequestConverters.indexPutSettings(updateSettingsRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_settings"); - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertEquals(HttpPut.METHOD_NAME, request.getMethod()); - assertToXContentBody(updateSettingsRequest, request.getEntity()); - assertEquals(expectedParams, request.getParameters()); - } - - public void testPutTemplateRequest() throws Exception { - Map names = new HashMap<>(); - names.put("log", "log"); - names.put("template#1", "template%231"); - names.put("-#template", "-%23template"); - names.put("foo^bar", "foo%5Ebar"); - - PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest().name(randomFrom(names.keySet())) - .patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); - if (randomBoolean()) { - putTemplateRequest.order(randomInt()); - } - if (randomBoolean()) { - putTemplateRequest.version(randomInt()); - } - if (randomBoolean()) { - putTemplateRequest.settings(Settings.builder().put("setting-" + randomInt(), randomTimeValue())); - } - if (randomBoolean()) { - putTemplateRequest.mapping("doc-" + randomInt(), "field-" + randomInt(), "type=" + randomFrom("text", "keyword")); - } - if (randomBoolean()) { - putTemplateRequest.alias(new Alias("alias-" + randomInt())); - } - Map expectedParams = new HashMap<>(); - if (randomBoolean()) { - expectedParams.put("create", Boolean.TRUE.toString()); - putTemplateRequest.create(true); - } - if (randomBoolean()) { - String cause = randomUnicodeOfCodepointLengthBetween(1, 50); - putTemplateRequest.cause(cause); - expectedParams.put("cause", cause); - } - setRandomMasterTimeout(putTemplateRequest, expectedParams); - Request request = RequestConverters.putTemplate(putTemplateRequest); - assertThat(request.getEndpoint(), equalTo("/_template/" + names.get(putTemplateRequest.name()))); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertToXContentBody(putTemplateRequest, request.getEntity()); - } - - public void testValidateQuery() throws Exception { - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); - String[] types = randomBoolean() ? generateRandomStringArray(5, 5, false, false) : null; - ValidateQueryRequest validateQueryRequest; - if (randomBoolean()) { - validateQueryRequest = new ValidateQueryRequest(indices); - } else { - validateQueryRequest = new ValidateQueryRequest(); - validateQueryRequest.indices(indices); - } - validateQueryRequest.types(types); - Map expectedParams = new HashMap<>(); - setRandomIndicesOptions(validateQueryRequest::indicesOptions, validateQueryRequest::indicesOptions, expectedParams); - validateQueryRequest.explain(randomBoolean()); - validateQueryRequest.rewrite(randomBoolean()); - validateQueryRequest.allShards(randomBoolean()); - expectedParams.put("explain", Boolean.toString(validateQueryRequest.explain())); - expectedParams.put("rewrite", Boolean.toString(validateQueryRequest.rewrite())); - expectedParams.put("all_shards", Boolean.toString(validateQueryRequest.allShards())); - Request request = RequestConverters.validateQuery(validateQueryRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - if (types != null && types.length > 0) { - endpoint.add(String.join(",", types)); - } - } - endpoint.add("_validate/query"); - assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertToXContentBody(validateQueryRequest, request.getEntity()); - assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); - } - - public void testGetTemplateRequest() throws Exception { - Map encodes = new HashMap<>(); - encodes.put("log", "log"); - encodes.put("1", "1"); - encodes.put("template#1", "template%231"); - encodes.put("template-*", "template-*"); - encodes.put("foo^bar", "foo%5Ebar"); - List names = randomSubsetOf(1, encodes.keySet()); - GetIndexTemplatesRequest getTemplatesRequest = new GetIndexTemplatesRequest().names(names.toArray(new String[0])); - Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(getTemplatesRequest, expectedParams); - setRandomLocal(getTemplatesRequest, expectedParams); - Request request = RequestConverters.getTemplates(getTemplatesRequest); - assertThat(request.getEndpoint(), equalTo("/_template/" + names.stream().map(encodes::get).collect(Collectors.joining(",")))); - assertThat(request.getParameters(), equalTo(expectedParams)); - assertThat(request.getEntity(), nullValue()); - } - public void testPutScript() throws Exception { PutStoredScriptRequest putStoredScriptRequest = new PutStoredScriptRequest(); @@ -2289,7 +1478,7 @@ static void setRandomIndicesOptions(Consumer setter, Supplier expectedParams) { + static void setRandomIncludeDefaults(GetIndexRequest request, Map expectedParams) { if (randomBoolean()) { boolean includeDefaults = randomBoolean(); request.includeDefaults(includeDefaults); @@ -2299,7 +1488,7 @@ private static void setRandomIncludeDefaults(GetIndexRequest request, Map expectedParams) { + static void setRandomHumanReadable(GetIndexRequest request, Map expectedParams) { if (randomBoolean()) { boolean humanReadable = randomBoolean(); request.humanReadable(humanReadable); @@ -2309,7 +1498,7 @@ private static void setRandomHumanReadable(GetIndexRequest request, Map setter, Map expectedParams) { + static void setRandomLocal(Consumer setter, Map expectedParams) { if (randomBoolean()) { boolean local = randomBoolean(); setter.accept(local); @@ -2343,7 +1532,7 @@ static void setRandomMasterTimeout(MasterNodeRequest request, Map setter, Map expectedParams) { + static void setRandomWaitForActiveShards(Consumer setter, Map expectedParams) { setRandomWaitForActiveShards(setter, ActiveShardCount.DEFAULT, expectedParams); } From 012b9c7539471ecde25515cf49b12846af0bc9e0 Mon Sep 17 00:00:00 2001 From: ben5556 <39107453+ben5556@users.noreply.github.com> Date: Tue, 18 Sep 2018 13:21:15 +1200 Subject: [PATCH 04/46] Corrected aggregation name to match the example (#33786) --- docs/reference/aggregations/metrics/sum-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/metrics/sum-aggregation.asciidoc b/docs/reference/aggregations/metrics/sum-aggregation.asciidoc index 55c1c3f80fac7..8825f07952155 100644 --- a/docs/reference/aggregations/metrics/sum-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/sum-aggregation.asciidoc @@ -40,7 +40,7 @@ Resulting in: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -The name of the aggregation (`intraday_return` above) also serves as the key by which the aggregation result can be retrieved from the returned response. +The name of the aggregation (`hat_prices` above) also serves as the key by which the aggregation result can be retrieved from the returned response. ==== Script From a95226bdaeaf13347d1bbd2761e3eeaead47c145 Mon Sep 17 00:00:00 2001 From: Shaunak Kashyap Date: Mon, 17 Sep 2018 18:29:30 -0700 Subject: [PATCH 05/46] [Monitoring] Removing unused version.* fields (#33584) This PR removes fields that are not actually used by the Monitoring UI. This will greatly simplify the eventual migration to using Metricbeat for monitoring Elasticsearch (see https://github.com/elastic/beats/pull/8260#discussion_r215885868 for more context and discussion around removing these fields from ES collection). --- .../collector/indices/IndexStatsMonitoringDoc.java | 9 --------- .../collector/indices/IndexStatsMonitoringDocTests.java | 4 ---- .../xpack/monitoring/integration/MonitoringIT.java | 2 +- 3 files changed, 1 insertion(+), 14 deletions(-) diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java index f90abb1639d5c..c72e645dfd04f 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java @@ -69,13 +69,6 @@ protected void innerToXContent(XContentBuilder builder, Params params) throws IO builder.field("created", metaData.getCreationDate()); builder.field("status", health.getStatus().name().toLowerCase(Locale.ROOT)); - builder.startObject("version"); - { - builder.field("created", metaData.getCreationVersion()); - builder.field("upgraded", metaData.getUpgradedVersion()); - } - builder.endObject(); - builder.startObject("shards"); { final int total = metaData.getTotalNumberOfShards(); @@ -128,8 +121,6 @@ protected void innerToXContent(XContentBuilder builder, Params params) throws IO "index_stats.uuid", "index_stats.created", "index_stats.status", - "index_stats.version.created", - "index_stats.version.upgraded", "index_stats.shards.total", "index_stats.shards.primaries", "index_stats.shards.replicas", diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDocTests.java index 278af123625e7..da9063507daa0 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDocTests.java @@ -290,10 +290,6 @@ private String indexStatsSummary() { "\"uuid\":\"" + index.getUUID() + "\"," + "\"created\":" + metaData.getCreationDate() + "," + "\"status\":\"" + indexHealth.getStatus().name().toLowerCase(Locale.ROOT) + "\"," + - "\"version\":{" + - "\"created\":\"" + metaData.getCreationVersion() + "\"," + - "\"upgraded\":\"" + metaData.getUpgradedVersion() + "\"" + - "}," + "\"shards\":{" + "\"total\":" + total + "," + "\"primaries\":" + primaries + "," + diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index 77a70f5da57e3..a6f9a14f28b63 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -417,7 +417,7 @@ private void assertIndexStatsMonitoringDoc(final Map document) { // particular field values checked in the index stats tests final Map indexStats = (Map) source.get(IndexStatsMonitoringDoc.TYPE); - assertEquals(8, indexStats.size()); + assertEquals(7, indexStats.size()); assertThat((String) indexStats.get("index"), not(isEmptyOrNullString())); assertThat((String) indexStats.get("uuid"), not(isEmptyOrNullString())); assertThat(indexStats.get("created"), notNullValue()); From 2aba52de8f9315b0e384e1c657d7b0401d26a1b0 Mon Sep 17 00:00:00 2001 From: Shaunak Kashyap Date: Mon, 17 Sep 2018 18:33:43 -0700 Subject: [PATCH 06/46] Implement xpack.monitoring.elasticsearch.collection.enabled setting (#33474) * Implement xpack.monitoring.elasticsearch.collection.enabled setting * Fixing line lengths * Updating constructor calls in test * Removing unused import * Fixing line lengths in test classes * Make monitoringService.isElasticsearchCollectionEnabled() return true for tests * Remove wrong expectation * Adding unit tests for new flag to be false * Fixing line wrapping/indentation for better readability * Adding docs * Fixing logic in ClusterStatsCollector::shouldCollect * Rebasing with master and resolving conflicts * Simplifying implementation by gating scheduling * Doc fixes / improvements * Making methods package private * Fixing wording * Fixing method access --- .../configuring-monitoring.asciidoc | 11 ++++-- .../monitoring/pause-export.asciidoc | 10 +++++ .../settings/monitoring-settings.asciidoc | 11 ++++++ .../xpack/monitoring/Monitoring.java | 1 + .../xpack/monitoring/MonitoringService.java | 38 +++++++++++++++++-- .../indices/IndexRecoveryCollectorTests.java | 2 +- 6 files changed, 64 insertions(+), 9 deletions(-) diff --git a/docs/reference/monitoring/configuring-monitoring.asciidoc b/docs/reference/monitoring/configuring-monitoring.asciidoc index 3bcfef2acbf29..6708b791036a9 100644 --- a/docs/reference/monitoring/configuring-monitoring.asciidoc +++ b/docs/reference/monitoring/configuring-monitoring.asciidoc @@ -13,10 +13,13 @@ indices. You can also adjust how monitoring data is displayed. . To collect monitoring data about your {es} cluster: -.. Verify that the `xpack.monitoring.enabled` and -`xpack.monitoring.collection.enabled` settings are `true` on each node in the -cluster. By default, data collection is disabled. For more information, see -<>. +.. Verify that the `xpack.monitoring.enabled`, +`xpack.monitoring.collection.enabled`, and +`xpack.monitoring.elasticsearch.collection.enabled` settings are `true` on each +node in the cluster. By default xpack.monitoring.collection.enabled is disabled +(`false`), and that overrides xpack.monitoring.elasticsearch.collection.enabled, +which defaults to being enabled (`true`). Both settings can be set dynamically +at runtime. For more information, see <>. .. Optional: Specify which indices you want to monitor. + diff --git a/docs/reference/monitoring/pause-export.asciidoc b/docs/reference/monitoring/pause-export.asciidoc index 128e72a463c2d..7a8bc664ffc38 100644 --- a/docs/reference/monitoring/pause-export.asciidoc +++ b/docs/reference/monitoring/pause-export.asciidoc @@ -16,6 +16,16 @@ monitoring data from other sources such as {kib}, Beats, and Logstash is ignored You can update this setting by using the {ref}/cluster-update-settings.html[Cluster Update Settings API]. +If you want to collect data from sources such as {kib}, Beats, and Logstash but +not collect data about your {es} cluster, you can disable data collection +just for {es}: + +[source,yaml] +--------------------------------------------------- +xpack.monitoring.collection.enabled: true +xpack.monitoring.elasticsearch.collection.enabled: false +--------------------------------------------------- + If you want to separately disable a specific exporter, you can specify the `enabled` setting (which defaults to `true`) per exporter. For example: diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index 2759944e61572..a039084412cda 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -66,6 +66,17 @@ option in `kibana.yml` to the same value. You can update this setting through the <>. +`xpack.monitoring.elasticsearch.collection.enabled`:: + +Controls whether statistics about your {es} cluster should be collected. Defaults to `true`. +This is different from xpack.monitoring.collection.enabled, which allows you to enable or disable +all monitoring collection. However, this setting simply disables the collection of Elasticsearch +data while still allowing other data (e.g., Kibana, Logstash, Beats, or APM Server monitoring data) +to pass through this cluster. ++ +You can update this setting through the +<>. + `xpack.monitoring.collection.cluster.stats.timeout`:: Sets the timeout for collecting the cluster statistics. Defaults to `10s`. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index bb2ed76831da2..027cb7de937f4 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -174,6 +174,7 @@ public List> getSettings() { settings.add(MonitoringField.HISTORY_DURATION); settings.add(CLEAN_WATCHER_HISTORY); settings.add(MonitoringService.ENABLED); + settings.add(MonitoringService.ELASTICSEARCH_COLLECTION_ENABLED); settings.add(MonitoringService.INTERVAL); settings.add(Collector.INDICES); settings.add(ClusterStatsCollector.CLUSTER_STATS_TIMEOUT); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java index 07d24826f8648..073a4cf785c41 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java @@ -43,8 +43,21 @@ public class MonitoringService extends AbstractLifecycleComponent { */ public static final TimeValue MIN_INTERVAL = TimeValue.timeValueSeconds(1L); + /* + * Dynamically controls enabling or disabling the collection of Monitoring data only from Elasticsearch. + *

+ * This should only be used while transitioning to Metricbeat-based data collection for Elasticsearch with + * {@linkplain #ENABLED} set to {@code true}. By setting this to {@code false} and that value to {@code true}, + * Kibana, Logstash, Beats, and APM Server can all continue to report their stats through this cluster until they + * are transitioned to being monitored by Metricbeat as well. + */ + public static final Setting ELASTICSEARCH_COLLECTION_ENABLED = + Setting.boolSetting("xpack.monitoring.elasticsearch.collection.enabled", true, + Setting.Property.Dynamic, Setting.Property.NodeScope); + /** - * Dynamically controls enabling or disabling the collection of Monitoring data. + * Dynamically controls enabling or disabling the collection of Monitoring data from Elasticsearch as well as other products + * in the stack. */ public static final Setting ENABLED = Setting.boolSetting("xpack.monitoring.collection.enabled", false, @@ -68,6 +81,7 @@ public class MonitoringService extends AbstractLifecycleComponent { private final Set collectors; private final Exporters exporters; + private volatile boolean elasticsearchCollectionEnabled; private volatile boolean enabled; private volatile TimeValue interval; private volatile ThreadPool.Cancellable scheduler; @@ -79,13 +93,21 @@ public class MonitoringService extends AbstractLifecycleComponent { this.threadPool = Objects.requireNonNull(threadPool); this.collectors = Objects.requireNonNull(collectors); this.exporters = Objects.requireNonNull(exporters); + this.elasticsearchCollectionEnabled = ELASTICSEARCH_COLLECTION_ENABLED.get(settings); this.enabled = ENABLED.get(settings); this.interval = INTERVAL.get(settings); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(ELASTICSEARCH_COLLECTION_ENABLED, this::setElasticsearchCollectionEnabled); clusterService.getClusterSettings().addSettingsUpdateConsumer(ENABLED, this::setMonitoringActive); clusterService.getClusterSettings().addSettingsUpdateConsumer(INTERVAL, this::setInterval); } + void setElasticsearchCollectionEnabled(final boolean enabled) { + this.elasticsearchCollectionEnabled = enabled; + scheduleExecution(); + } + void setMonitoringActive(final boolean enabled) { this.enabled = enabled; scheduleExecution(); @@ -104,6 +126,14 @@ public boolean isMonitoringActive() { return isStarted() && enabled; } + boolean isElasticsearchCollectionEnabled() { + return this.elasticsearchCollectionEnabled; + } + + boolean shouldScheduleExecution() { + return isElasticsearchCollectionEnabled() && isMonitoringActive(); + } + private String threadPoolName() { return ThreadPool.Names.GENERIC; } @@ -155,7 +185,7 @@ void scheduleExecution() { if (scheduler != null) { cancelExecution(); } - if (isMonitoringActive()) { + if (shouldScheduleExecution()) { scheduler = threadPool.scheduleWithFixedDelay(monitor, interval, threadPoolName()); } } @@ -188,7 +218,7 @@ class MonitoringExecution extends AbstractRunnable implements Closeable { @Override public void doRun() { - if (isMonitoringActive() == false) { + if (shouldScheduleExecution() == false) { logger.debug("monitoring execution is skipped"); return; } @@ -223,7 +253,7 @@ protected void doRun() throws Exception { new ParameterizedMessage("monitoring collector [{}] failed to collect data", collector.name()), e); } } - if (isMonitoringActive()) { + if (shouldScheduleExecution()) { exporters.export(results, ActionListener.wrap(r -> semaphore.release(), this::onFailure)); } else { semaphore.release(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollectorTests.java index 47504736d26e9..f4484aa5ed755 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollectorTests.java @@ -182,4 +182,4 @@ public void testDoCollect() throws Exception { assertThat(recoveries.shardRecoveryStates().size(), equalTo(nbRecoveries)); } } -} \ No newline at end of file +} From 615f494c7783bf2e689392213954a341812b0802 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 18 Sep 2018 07:25:22 +0200 Subject: [PATCH 07/46] MINOR: Drop Redundant Ctx. Check in ScriptService (#33782) * MINOR: Drop Redundant Ctx. Check in ScriptService * This check is completely redundant, the expression script engine will throw anyway (and with a similar message) for those contexts that it cannot compile. Moreover, the update context is not the only context that is not suported by the expression engine at this point so handling the update context separately here makes no sense. --- .../script/expression/MoreExpressionTests.java | 2 +- .../script/expression/StoredExpressionTests.java | 2 +- .../java/org/elasticsearch/script/ScriptService.java | 9 --------- 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java index 932e5979c0f9a..6d7ab1d259524 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java @@ -582,7 +582,7 @@ public void testInvalidUpdateScript() throws Exception { String message = e.getMessage(); assertThat(message + " should have contained failed to execute", message.contains("failed to execute"), equalTo(true)); message = e.getCause().getMessage(); - assertThat(message + " should have contained not supported", message.contains("not supported"), equalTo(true)); + assertThat(message, equalTo("Failed to compile inline script [0] using lang [expression]")); } } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/StoredExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/StoredExpressionTests.java index c922392a05ab4..1877326b7d063 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/StoredExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/StoredExpressionTests.java @@ -61,7 +61,7 @@ public void testAllOpsDisabledIndexedScripts() throws IOException { fail("update script should have been rejected"); } catch(Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); - assertThat(e.getCause().getMessage(), containsString("scripts of type [stored], operation [update] and lang [expression] are not supported")); + assertThat(e.getCause().getMessage(), containsString("Failed to compile stored script [script1] using lang [expression]")); } try { client().prepareSearch() diff --git a/server/src/main/java/org/elasticsearch/script/ScriptService.java b/server/src/main/java/org/elasticsearch/script/ScriptService.java index d37cefb3a0103..6a54af8721e3b 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptService.java @@ -281,15 +281,6 @@ public FactoryType compile(Script script, ScriptContext Date: Tue, 18 Sep 2018 09:43:50 +0200 Subject: [PATCH 08/46] [CCR] Changed AutoFollowCoordinator to keep track of certain statistics (#33684) The following stats are being kept track of: 1) The total number of times that auto following a leader index succeed. 2) The total number of times that auto following a leader index failed. 3) The total number of times that fetching a remote cluster state failed. 4) The most recent 256 auto follow failures per auto leader index (e.g. create_and_follow api call fails) or cluster alias (e.g. fetching remote cluster state fails). Each auto follow run now produces a result that is being used to update the stats being kept track of in AutoFollowCoordinator. Relates to #33007 --- .../xpack/ccr/CcrMultiClusterLicenseIT.java | 2 +- .../xpack/ccr/CcrLicenseChecker.java | 2 +- .../ccr/action/AutoFollowCoordinator.java | 222 ++++++++++++------ .../action/AutoFollowCoordinatorTests.java | 101 +++++++- .../ccr/action/AutoFollowStatsTests.java | 77 ++++++ .../xpack/core/ccr/AutoFollowStats.java | 194 +++++++++++++++ 6 files changed, 516 insertions(+), 82 deletions(-) create mode 100644 x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowStatsTests.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java index 7bc952a3ea8e8..505683b892ca8 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java @@ -64,7 +64,7 @@ public void testAutoFollow() throws Exception { while (it.hasNext()) { final String line = it.next(); if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " + - "failure occurred during auto-follower coordination")) { + "failure occurred while fetching cluster state in leader cluster \\[leader_cluster\\]")) { warn = true; break; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java index c000072588704..f597871fc66e6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -64,7 +64,7 @@ public final class CcrLicenseChecker { * * @param isCcrAllowed a boolean supplier that should return true if CCR is allowed and false otherwise */ - CcrLicenseChecker(final BooleanSupplier isCcrAllowed) { + public CcrLicenseChecker(final BooleanSupplier isCcrAllowed) { this.isCcrAllowed = Objects.requireNonNull(isCcrAllowed); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 180e5e3799098..3a524e5724980 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -7,6 +7,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.client.Client; @@ -17,8 +19,10 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.license.LicenseUtils; @@ -27,15 +31,18 @@ import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.AutoFollowStats; import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; +import java.util.TreeMap; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -47,6 +54,7 @@ public class AutoFollowCoordinator implements ClusterStateApplier { private static final Logger LOGGER = LogManager.getLogger(AutoFollowCoordinator.class); + private static final int MAX_AUTO_FOLLOW_ERRORS = 256; private final Client client; private final TimeValue pollInterval; @@ -56,6 +64,12 @@ public class AutoFollowCoordinator implements ClusterStateApplier { private volatile boolean localNodeMaster = false; + // The following fields are read and updated under a lock: + private long numberOfSuccessfulIndicesAutoFollowed = 0; + private long numberOfFailedIndicesAutoFollowed = 0; + private long numberOfFailedRemoteClusterStateRequests = 0; + private final LinkedHashMap recentAutoFollowErrors; + public AutoFollowCoordinator( Settings settings, Client client, @@ -69,6 +83,47 @@ public AutoFollowCoordinator( this.pollInterval = CcrSettings.CCR_AUTO_FOLLOW_POLL_INTERVAL.get(settings); clusterService.addStateApplier(this); + + this.recentAutoFollowErrors = new LinkedHashMap() { + @Override + protected boolean removeEldestEntry(final Map.Entry eldest) { + return size() > MAX_AUTO_FOLLOW_ERRORS; + } + }; + } + + public synchronized AutoFollowStats getStats() { + return new AutoFollowStats( + numberOfFailedIndicesAutoFollowed, + numberOfFailedRemoteClusterStateRequests, + numberOfSuccessfulIndicesAutoFollowed, + new TreeMap<>(recentAutoFollowErrors) + ); + } + + synchronized void updateStats(List results) { + for (AutoFollowResult result : results) { + if (result.clusterStateFetchException != null) { + recentAutoFollowErrors.put(result.clusterAlias, + new ElasticsearchException(result.clusterStateFetchException)); + numberOfFailedRemoteClusterStateRequests++; + LOGGER.warn(new ParameterizedMessage("failure occurred while fetching cluster state in leader cluster [{}]", + result.clusterAlias), result.clusterStateFetchException); + } else { + for (Map.Entry entry : result.autoFollowExecutionResults.entrySet()) { + if (entry.getValue() != null) { + numberOfFailedIndicesAutoFollowed++; + recentAutoFollowErrors.put(result.clusterAlias + ":" + entry.getKey().getName(), + new ElasticsearchException(entry.getValue())); + LOGGER.warn(new ParameterizedMessage("failure occurred while auto following index [{}] in leader cluster [{}]", + entry.getKey(), result.clusterAlias), entry.getValue()); + } else { + numberOfSuccessfulIndicesAutoFollowed++; + } + } + } + + } } private void doAutoFollow() { @@ -94,10 +149,8 @@ private void doAutoFollow() { return; } - Consumer handler = e -> { - if (e != null) { - LOGGER.warn("failure occurred during auto-follower coordination", e); - } + Consumer> handler = results -> { + updateStats(results); threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow); }; AutoFollower operation = new AutoFollower(handler, followerClusterState) { @@ -178,101 +231,97 @@ public void applyClusterState(ClusterChangedEvent event) { abstract static class AutoFollower { - private final Consumer handler; + private final Consumer> handler; private final ClusterState followerClusterState; private final AutoFollowMetadata autoFollowMetadata; private final CountDown autoFollowPatternsCountDown; - private final AtomicReference autoFollowPatternsErrorHolder = new AtomicReference<>(); + private final AtomicArray autoFollowResults; - AutoFollower(final Consumer handler, final ClusterState followerClusterState) { + AutoFollower(final Consumer> handler, final ClusterState followerClusterState) { this.handler = handler; this.followerClusterState = followerClusterState; this.autoFollowMetadata = followerClusterState.getMetaData().custom(AutoFollowMetadata.TYPE); this.autoFollowPatternsCountDown = new CountDown(autoFollowMetadata.getPatterns().size()); + this.autoFollowResults = new AtomicArray<>(autoFollowMetadata.getPatterns().size()); } void autoFollowIndices() { + int i = 0; for (Map.Entry entry : autoFollowMetadata.getPatterns().entrySet()) { - String clusterAlias = entry.getKey(); - AutoFollowPattern autoFollowPattern = entry.getValue(); - List followedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(clusterAlias); + final int slot = i; + final String clusterAlias = entry.getKey(); + final AutoFollowPattern autoFollowPattern = entry.getValue(); getLeaderClusterState(autoFollowPattern.getHeaders(), clusterAlias, (leaderClusterState, e) -> { if (leaderClusterState != null) { assert e == null; - handleClusterAlias(clusterAlias, autoFollowPattern, followedIndices, leaderClusterState); + final List followedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(clusterAlias); + final List leaderIndicesToFollow = + getLeaderIndicesToFollow(autoFollowPattern, leaderClusterState, followerClusterState, followedIndices); + if (leaderIndicesToFollow.isEmpty()) { + finalise(slot, new AutoFollowResult(clusterAlias)); + } else { + Consumer resultHandler = result -> finalise(slot, result); + checkAutoFollowPattern(clusterAlias, autoFollowPattern, leaderIndicesToFollow, resultHandler); + } } else { - finalise(e); + finalise(slot, new AutoFollowResult(clusterAlias, e)); } }); + i++; } } - private void handleClusterAlias(String clusterAlias, AutoFollowPattern autoFollowPattern, - List followedIndexUUIDs, ClusterState leaderClusterState) { - final List leaderIndicesToFollow = - getLeaderIndicesToFollow(autoFollowPattern, leaderClusterState, followerClusterState, followedIndexUUIDs); - if (leaderIndicesToFollow.isEmpty()) { - finalise(null); - } else { - final CountDown leaderIndicesCountDown = new CountDown(leaderIndicesToFollow.size()); - final AtomicReference leaderIndicesErrorHolder = new AtomicReference<>(); - for (Index indexToFollow : leaderIndicesToFollow) { - final String leaderIndexName = indexToFollow.getName(); - final String followIndexName = getFollowerIndexName(autoFollowPattern, leaderIndexName); - - String leaderIndexNameWithClusterAliasPrefix = clusterAlias.equals("_local_") ? leaderIndexName : - clusterAlias + ":" + leaderIndexName; - FollowIndexAction.Request followRequest = - new FollowIndexAction.Request(leaderIndexNameWithClusterAliasPrefix, followIndexName, - autoFollowPattern.getMaxBatchOperationCount(), autoFollowPattern.getMaxConcurrentReadBatches(), - autoFollowPattern.getMaxOperationSizeInBytes(), autoFollowPattern.getMaxConcurrentWriteBatches(), - autoFollowPattern.getMaxWriteBufferSize(), autoFollowPattern.getMaxRetryDelay(), - autoFollowPattern.getIdleShardRetryDelay()); - - // Execute if the create and follow api call succeeds: - Runnable successHandler = () -> { - LOGGER.info("Auto followed leader index [{}] as follow index [{}]", leaderIndexName, followIndexName); - - // This function updates the auto follow metadata in the cluster to record that the leader index has been followed: - // (so that we do not try to follow it in subsequent auto follow runs) - Function function = recordLeaderIndexAsFollowFunction(clusterAlias, indexToFollow); - // The coordinator always runs on the elected master node, so we can update cluster state here: - updateAutoFollowMetadata(function, updateError -> { - if (updateError != null) { - LOGGER.error("Failed to mark leader index [" + leaderIndexName + "] as auto followed", updateError); - if (leaderIndicesErrorHolder.compareAndSet(null, updateError) == false) { - leaderIndicesErrorHolder.get().addSuppressed(updateError); - } - } else { - LOGGER.debug("Successfully marked leader index [{}] as auto followed", leaderIndexName); - } - if (leaderIndicesCountDown.countDown()) { - finalise(leaderIndicesErrorHolder.get()); - } - }); - }; - // Execute if the create and follow apu call fails: - Consumer failureHandler = followError -> { - assert followError != null; - LOGGER.warn("Failed to auto follow leader index [" + leaderIndexName + "]", followError); - if (leaderIndicesCountDown.countDown()) { - finalise(followError); - } - }; - createAndFollow(autoFollowPattern.getHeaders(), followRequest, successHandler, failureHandler); - } + private void checkAutoFollowPattern(String clusterAlias, AutoFollowPattern autoFollowPattern, + List leaderIndicesToFollow, Consumer resultHandler) { + + final CountDown leaderIndicesCountDown = new CountDown(leaderIndicesToFollow.size()); + final AtomicArray> results = new AtomicArray<>(leaderIndicesToFollow.size()); + for (int i = 0; i < leaderIndicesToFollow.size(); i++) { + final Index indexToFollow = leaderIndicesToFollow.get(i); + final int slot = i; + followLeaderIndex(clusterAlias, indexToFollow, autoFollowPattern, error -> { + results.set(slot, new Tuple<>(indexToFollow, error)); + if (leaderIndicesCountDown.countDown()) { + resultHandler.accept(new AutoFollowResult(clusterAlias, results.asList())); + } + }); } } - private void finalise(Exception failure) { - if (autoFollowPatternsErrorHolder.compareAndSet(null, failure) == false) { - autoFollowPatternsErrorHolder.get().addSuppressed(failure); - } + private void followLeaderIndex(String clusterAlias, Index indexToFollow, + AutoFollowPattern pattern, Consumer onResult) { + final String leaderIndexName = indexToFollow.getName(); + final String followIndexName = getFollowerIndexName(pattern, leaderIndexName); + + String leaderIndexNameWithClusterAliasPrefix = clusterAlias.equals("_local_") ? leaderIndexName : + clusterAlias + ":" + leaderIndexName; + FollowIndexAction.Request request = + new FollowIndexAction.Request(leaderIndexNameWithClusterAliasPrefix, followIndexName, + pattern.getMaxBatchOperationCount(), pattern.getMaxConcurrentReadBatches(), + pattern.getMaxOperationSizeInBytes(), pattern.getMaxConcurrentWriteBatches(), + pattern.getMaxWriteBufferSize(), pattern.getMaxRetryDelay(), + pattern.getIdleShardRetryDelay()); + + // Execute if the create and follow api call succeeds: + Runnable successHandler = () -> { + LOGGER.info("Auto followed leader index [{}] as follow index [{}]", leaderIndexName, followIndexName); + + // This function updates the auto follow metadata in the cluster to record that the leader index has been followed: + // (so that we do not try to follow it in subsequent auto follow runs) + Function function = recordLeaderIndexAsFollowFunction(clusterAlias, indexToFollow); + // The coordinator always runs on the elected master node, so we can update cluster state here: + updateAutoFollowMetadata(function, onResult); + }; + createAndFollow(pattern.getHeaders(), request, successHandler, onResult); + } + private void finalise(int slot, AutoFollowResult result) { + assert autoFollowResults.get(slot) == null; + autoFollowResults.set(slot, result); if (autoFollowPatternsCountDown.countDown()) { - handler.accept(autoFollowPatternsErrorHolder.get()); + handler.accept(autoFollowResults.asList()); } } @@ -347,4 +396,33 @@ abstract void updateAutoFollowMetadata( ); } + + static class AutoFollowResult { + + final String clusterAlias; + final Exception clusterStateFetchException; + final Map autoFollowExecutionResults; + + AutoFollowResult(String clusterAlias, List> results) { + this.clusterAlias = clusterAlias; + + Map autoFollowExecutionResults = new HashMap<>(); + for (Tuple result : results) { + autoFollowExecutionResults.put(result.v1(), result.v2()); + } + + this.clusterStateFetchException = null; + this.autoFollowExecutionResults = Collections.unmodifiableMap(autoFollowExecutionResults); + } + + AutoFollowResult(String clusterAlias, Exception e) { + this.clusterAlias = clusterAlias; + this.clusterStateFetchException = e; + this.autoFollowExecutionResults = Collections.emptyMap(); + } + + AutoFollowResult(String clusterAlias) { + this(clusterAlias, (Exception) null); + } + } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 31af326250c3b..218825e41207b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -11,15 +11,20 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.AutoFollowStats; import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -63,9 +68,15 @@ public void testAutoFollower() { .build(); boolean[] invoked = new boolean[]{false}; - Consumer handler = e -> { + Consumer> handler = results -> { invoked[0] = true; - assertThat(e, nullValue()); + + assertThat(results.size(), equalTo(1)); + assertThat(results.get(0).clusterStateFetchException, nullValue()); + List> entries = new ArrayList<>(results.get(0).autoFollowExecutionResults.entrySet()); + assertThat(entries.size(), equalTo(1)); + assertThat(entries.get(0).getKey().getName(), equalTo("logs-20190101")); + assertThat(entries.get(0).getValue(), nullValue()); }; AutoFollower autoFollower = new AutoFollower(handler, currentState) { @Override @@ -116,9 +127,12 @@ public void testAutoFollowerClusterStateApiFailure() { Exception failure = new RuntimeException("failure"); boolean[] invoked = new boolean[]{false}; - Consumer handler = e -> { + Consumer> handler = results -> { invoked[0] = true; - assertThat(e, sameInstance(failure)); + + assertThat(results.size(), equalTo(1)); + assertThat(results.get(0).clusterStateFetchException, sameInstance(failure)); + assertThat(results.get(0).autoFollowExecutionResults.entrySet().size(), equalTo(0)); }; AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override @@ -170,9 +184,15 @@ public void testAutoFollowerUpdateClusterStateFailure() { Exception failure = new RuntimeException("failure"); boolean[] invoked = new boolean[]{false}; - Consumer handler = e -> { + Consumer> handler = results -> { invoked[0] = true; - assertThat(e, sameInstance(failure)); + + assertThat(results.size(), equalTo(1)); + assertThat(results.get(0).clusterStateFetchException, nullValue()); + List> entries = new ArrayList<>(results.get(0).autoFollowExecutionResults.entrySet()); + assertThat(entries.size(), equalTo(1)); + assertThat(entries.get(0).getKey().getName(), equalTo("logs-20190101")); + assertThat(entries.get(0).getValue(), sameInstance(failure)); }; AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override @@ -225,9 +245,15 @@ public void testAutoFollowerCreateAndFollowApiCallFailure() { Exception failure = new RuntimeException("failure"); boolean[] invoked = new boolean[]{false}; - Consumer handler = e -> { + Consumer> handler = results -> { invoked[0] = true; - assertThat(e, sameInstance(failure)); + + assertThat(results.size(), equalTo(1)); + assertThat(results.get(0).clusterStateFetchException, nullValue()); + List> entries = new ArrayList<>(results.get(0).autoFollowExecutionResults.entrySet()); + assertThat(entries.size(), equalTo(1)); + assertThat(entries.get(0).getKey().getName(), equalTo("logs-20190101")); + assertThat(entries.get(0).getValue(), sameInstance(failure)); }; AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override @@ -317,4 +343,63 @@ public void testGetFollowerIndexName() { assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); } + public void testStats() { + AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( + Settings.EMPTY, + null, + null, + mock(ClusterService.class), + new CcrLicenseChecker(() -> true) + ); + + autoFollowCoordinator.updateStats(Collections.singletonList( + new AutoFollowCoordinator.AutoFollowResult("_alias1")) + ); + AutoFollowStats autoFollowStats = autoFollowCoordinator.getStats(); + assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), equalTo(0L)); + assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(0L)); + assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(0L)); + assertThat(autoFollowStats.getRecentAutoFollowErrors().size(), equalTo(0)); + + autoFollowCoordinator.updateStats(Collections.singletonList( + new AutoFollowCoordinator.AutoFollowResult("_alias1", new RuntimeException("error"))) + ); + autoFollowStats = autoFollowCoordinator.getStats(); + assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), equalTo(0L)); + assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(1L)); + assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(0L)); + assertThat(autoFollowStats.getRecentAutoFollowErrors().size(), equalTo(1)); + assertThat(autoFollowStats.getRecentAutoFollowErrors().get("_alias1").getCause().getMessage(), equalTo("error")); + + autoFollowCoordinator.updateStats(Arrays.asList( + new AutoFollowCoordinator.AutoFollowResult("_alias1", + Collections.singletonList(Tuple.tuple(new Index("index1", "_na_"), new RuntimeException("error")))), + new AutoFollowCoordinator.AutoFollowResult("_alias2", + Collections.singletonList(Tuple.tuple(new Index("index2", "_na_"), new RuntimeException("error")))) + )); + autoFollowStats = autoFollowCoordinator.getStats(); + assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), equalTo(2L)); + assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(1L)); + assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(0L)); + assertThat(autoFollowStats.getRecentAutoFollowErrors().size(), equalTo(3)); + assertThat(autoFollowStats.getRecentAutoFollowErrors().get("_alias1").getCause().getMessage(), equalTo("error")); + assertThat(autoFollowStats.getRecentAutoFollowErrors().get("_alias1:index1").getCause().getMessage(), equalTo("error")); + assertThat(autoFollowStats.getRecentAutoFollowErrors().get("_alias2:index2").getCause().getMessage(), equalTo("error")); + + autoFollowCoordinator.updateStats(Arrays.asList( + new AutoFollowCoordinator.AutoFollowResult("_alias1", + Collections.singletonList(Tuple.tuple(new Index("index1", "_na_"), null))), + new AutoFollowCoordinator.AutoFollowResult("_alias2", + Collections.singletonList(Tuple.tuple(new Index("index2", "_na_"), null))) + )); + autoFollowStats = autoFollowCoordinator.getStats(); + assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), equalTo(2L)); + assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(1L)); + assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(2L)); + assertThat(autoFollowStats.getRecentAutoFollowErrors().size(), equalTo(3)); + assertThat(autoFollowStats.getRecentAutoFollowErrors().get("_alias1").getCause().getMessage(), equalTo("error")); + assertThat(autoFollowStats.getRecentAutoFollowErrors().get("_alias1:index1").getCause().getMessage(), equalTo("error")); + assertThat(autoFollowStats.getRecentAutoFollowErrors().get("_alias2:index2").getCause().getMessage(), equalTo("error")); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowStatsTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowStatsTests.java new file mode 100644 index 0000000000000..b9ee5bf464616 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowStatsTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ccr.AutoFollowStats; + +import java.io.IOException; +import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class AutoFollowStatsTests extends AbstractSerializingTestCase { + + @Override + protected AutoFollowStats doParseInstance(XContentParser parser) throws IOException { + return AutoFollowStats.fromXContent(parser); + } + + @Override + protected AutoFollowStats createTestInstance() { + return new AutoFollowStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomReadExceptions() + ); + } + + private static NavigableMap randomReadExceptions() { + final int count = randomIntBetween(0, 16); + final NavigableMap readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put("" + i, new ElasticsearchException(new IllegalStateException("index [" + i + "]"))); + } + return readExceptions; + } + + @Override + protected Writeable.Reader instanceReader() { + return AutoFollowStats::new; + } + + @Override + protected void assertEqualInstances(AutoFollowStats expectedInstance, AutoFollowStats newInstance) { + assertNotSame(expectedInstance, newInstance); + + assertThat(newInstance.getRecentAutoFollowErrors().size(), equalTo(expectedInstance.getRecentAutoFollowErrors().size())); + assertThat(newInstance.getRecentAutoFollowErrors().keySet(), equalTo(expectedInstance.getRecentAutoFollowErrors().keySet())); + for (final Map.Entry entry : newInstance.getRecentAutoFollowErrors().entrySet()) { + // x-content loses the exception + final ElasticsearchException expected = expectedInstance.getRecentAutoFollowErrors().get(entry.getKey()); + assertThat(entry.getValue().getMessage(), containsString(expected.getMessage())); + assertNotNull(entry.getValue().getCause()); + assertThat( + entry.getValue().getCause(), + anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); + assertThat(entry.getValue().getCause().getMessage(), containsString(expected.getCause().getMessage())); + } + } + + @Override + protected boolean assertToXContentEquivalence() { + return false; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java new file mode 100644 index 0000000000000..7133a201f4e2a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ccr; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.TreeMap; +import java.util.stream.Collectors; + +public class AutoFollowStats implements Writeable, ToXContentObject { + + private static final ParseField NUMBER_OF_SUCCESSFUL_INDICES_AUTO_FOLLOWED = new ParseField("number_of_successful_follow_indices"); + private static final ParseField NUMBER_OF_FAILED_INDICES_AUTO_FOLLOWED = new ParseField("number_of_failed_follow_indices"); + private static final ParseField NUMBER_OF_FAILED_REMOTE_CLUSTER_STATE_REQUESTS = + new ParseField("number_of_failed_remote_cluster_state_requests"); + private static final ParseField RECENT_AUTO_FOLLOW_ERRORS = new ParseField("recent_auto_follow_errors"); + private static final ParseField LEADER_INDEX = new ParseField("leader_index"); + private static final ParseField AUTO_FOLLOW_EXCEPTION = new ParseField("auto_follow_exception"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser STATS_PARSER = new ConstructingObjectParser<>("auto_follow_stats", + args -> new AutoFollowStats( + (Long) args[0], + (Long) args[1], + (Long) args[2], + new TreeMap<>( + ((List>) args[3]) + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))) + )); + + private static final ConstructingObjectParser, Void> AUTO_FOLLOW_EXCEPTIONS_PARSER = + new ConstructingObjectParser<>( + "auto_follow_stats_errors", + args -> new AbstractMap.SimpleEntry<>((String) args[0], (ElasticsearchException) args[1])); + + static { + AUTO_FOLLOW_EXCEPTIONS_PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX); + AUTO_FOLLOW_EXCEPTIONS_PARSER.declareObject( + ConstructingObjectParser.constructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + AUTO_FOLLOW_EXCEPTION); + + STATS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_INDICES_AUTO_FOLLOWED); + STATS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_REMOTE_CLUSTER_STATE_REQUESTS); + STATS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_INDICES_AUTO_FOLLOWED); + STATS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), AUTO_FOLLOW_EXCEPTIONS_PARSER, + RECENT_AUTO_FOLLOW_ERRORS); + } + + public static AutoFollowStats fromXContent(final XContentParser parser) { + return STATS_PARSER.apply(parser, null); + } + + private final long numberOfFailedFollowIndices; + private final long numberOfFailedRemoteClusterStateRequests; + private final long numberOfSuccessfulFollowIndices; + private final NavigableMap recentAutoFollowErrors; + + public AutoFollowStats( + long numberOfFailedFollowIndices, + long numberOfFailedRemoteClusterStateRequests, + long numberOfSuccessfulFollowIndices, + NavigableMap recentAutoFollowErrors + ) { + this.numberOfFailedFollowIndices = numberOfFailedFollowIndices; + this.numberOfFailedRemoteClusterStateRequests = numberOfFailedRemoteClusterStateRequests; + this.numberOfSuccessfulFollowIndices = numberOfSuccessfulFollowIndices; + this.recentAutoFollowErrors = recentAutoFollowErrors; + } + + public AutoFollowStats(StreamInput in) throws IOException { + numberOfFailedFollowIndices = in.readVLong(); + numberOfFailedRemoteClusterStateRequests = in.readVLong(); + numberOfSuccessfulFollowIndices = in.readVLong(); + recentAutoFollowErrors= new TreeMap<>(in.readMap(StreamInput::readString, StreamInput::readException)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(numberOfFailedFollowIndices); + out.writeVLong(numberOfFailedRemoteClusterStateRequests); + out.writeVLong(numberOfSuccessfulFollowIndices); + out.writeMap(recentAutoFollowErrors, StreamOutput::writeString, StreamOutput::writeException); + } + + public long getNumberOfFailedFollowIndices() { + return numberOfFailedFollowIndices; + } + + public long getNumberOfFailedRemoteClusterStateRequests() { + return numberOfFailedRemoteClusterStateRequests; + } + + public long getNumberOfSuccessfulFollowIndices() { + return numberOfSuccessfulFollowIndices; + } + + public NavigableMap getRecentAutoFollowErrors() { + return recentAutoFollowErrors; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(NUMBER_OF_FAILED_INDICES_AUTO_FOLLOWED.getPreferredName(), numberOfFailedFollowIndices); + builder.field(NUMBER_OF_FAILED_REMOTE_CLUSTER_STATE_REQUESTS.getPreferredName(), numberOfFailedRemoteClusterStateRequests); + builder.field(NUMBER_OF_SUCCESSFUL_INDICES_AUTO_FOLLOWED.getPreferredName(), numberOfSuccessfulFollowIndices); + builder.startArray(RECENT_AUTO_FOLLOW_ERRORS.getPreferredName()); + { + for (final Map.Entry entry : recentAutoFollowErrors.entrySet()) { + builder.startObject(); + { + builder.field(LEADER_INDEX.getPreferredName(), entry.getKey()); + builder.field(AUTO_FOLLOW_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue()); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endArray(); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AutoFollowStats that = (AutoFollowStats) o; + return numberOfFailedFollowIndices == that.numberOfFailedFollowIndices && + numberOfFailedRemoteClusterStateRequests == that.numberOfFailedRemoteClusterStateRequests && + numberOfSuccessfulFollowIndices == that.numberOfSuccessfulFollowIndices && + /* + * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal + * up to the key set and their messages. Note that we are relying on the fact that the auto follow exceptions are ordered by + * keys. + */ + recentAutoFollowErrors.keySet().equals(that.recentAutoFollowErrors.keySet()) && + getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)); + } + + @Override + public int hashCode() { + return Objects.hash( + numberOfFailedFollowIndices, + numberOfFailedRemoteClusterStateRequests, + numberOfSuccessfulFollowIndices, + /* + * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the + * messages. Note that we are relying on the fact that the auto follow exceptions are ordered by keys. + */ + recentAutoFollowErrors.keySet(), + getFetchExceptionMessages(this) + ); + } + + private static List getFetchExceptionMessages(final AutoFollowStats status) { + return status.getRecentAutoFollowErrors().values().stream().map(ElasticsearchException::getMessage).collect(Collectors.toList()); + } + + @Override + public String toString() { + return "AutoFollowStats{" + + "numberOfFailedFollowIndices=" + numberOfFailedFollowIndices + + ", numberOfFailedRemoteClusterStateRequests=" + numberOfFailedRemoteClusterStateRequests + + ", numberOfSuccessfulFollowIndices=" + numberOfSuccessfulFollowIndices + + ", recentAutoFollowErrors=" + recentAutoFollowErrors + + '}'; + } +} From e075b872f620c4ac2abda18d281db332c7e69847 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Tue, 18 Sep 2018 10:14:12 +0200 Subject: [PATCH 09/46] Dependencies: Update javax.mail in watcher to 1.6.2 (#33664) --- x-pack/plugin/watcher/build.gradle | 2 +- x-pack/plugin/watcher/licenses/javax.mail-1.5.6.jar.sha1 | 1 - x-pack/plugin/watcher/licenses/javax.mail-1.6.2.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 x-pack/plugin/watcher/licenses/javax.mail-1.5.6.jar.sha1 create mode 100644 x-pack/plugin/watcher/licenses/javax.mail-1.6.2.jar.sha1 diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index 3412cafc4f4ce..6f3497df8fcb2 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -35,7 +35,7 @@ dependencies { // watcher deps compile 'com.googlecode.owasp-java-html-sanitizer:owasp-java-html-sanitizer:r239' compile 'com.google.guava:guava:16.0.1' // needed by watcher for the html sanitizer and security tests for jimfs - compile 'com.sun.mail:javax.mail:1.5.6' + compile 'com.sun.mail:javax.mail:1.6.2' // HACK: java 9 removed javax.activation from the default modules, so instead of trying to add modules, which would have // to be conditionalized for java 8/9, we pull in the classes directly compile 'javax.activation:activation:1.1.1' diff --git a/x-pack/plugin/watcher/licenses/javax.mail-1.5.6.jar.sha1 b/x-pack/plugin/watcher/licenses/javax.mail-1.5.6.jar.sha1 deleted file mode 100644 index c9d823f6a5300..0000000000000 --- a/x-pack/plugin/watcher/licenses/javax.mail-1.5.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab5daef2f881c42c8e280cbe918ec4d7fdfd7efe \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/javax.mail-1.6.2.jar.sha1 b/x-pack/plugin/watcher/licenses/javax.mail-1.6.2.jar.sha1 new file mode 100644 index 0000000000000..1c865d47f57c9 --- /dev/null +++ b/x-pack/plugin/watcher/licenses/javax.mail-1.6.2.jar.sha1 @@ -0,0 +1 @@ +935151eb71beff17a2ffac15dd80184a99a0514f \ No newline at end of file From 139128856a5f63deb41243756e2b3b3a843fa57c Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Tue, 18 Sep 2018 10:25:16 +0200 Subject: [PATCH 10/46] Watcher: Use Bulkprocessor in HistoryStore/TriggeredWatchStore (#32490) Currently a watch execution results in one bulk request, when the triggered watches are written into the that index, that need to be executed. However the update of the watch status, the creation of the watch history entry as well as the deletion of the triggered watches index are all single document operations. This can have quite a negative impact, once you are executing a lot of watches, as each execution results in 4 documents writes, three of them being single document actions. This commit switches to a bulk processor instead of a single document action for writing watch history entries and deleting triggered watch entries. However the defaults are to run synchronous as before because the number of concurrent requests is set to 0. This also fixes a bug, where the deletion of the triggered watch entry was done asynchronously. However if you have a high number of watches being executed, you can configure watcher to delete the triggered watches entries as well as writing the watch history entries via bulk requests. The triggered watches deletions should still happen in a timely manner, where as the history entries might actually be bound by size as one entry can easily have 20kb. The following settings have been added: - xpack.watcher.bulk.actions (default 1) - xpack.watcher.bulk.concurrent_requests (default 0) - xpack.watcher.bulk.flush_interval (default 1s) - xpack.watcher.bulk.size (default 1mb) The drawback of this is of course, that on a node outage you might end up with watch history entries not being written or watches needing to be executing again because they have not been deleted from the triggered watches index. The window of these two cases increases configuring the bulk processor to wait to reach certain thresholds. --- .../elasticsearch/xpack/watcher/Watcher.java | 86 ++++++++- .../watcher/execution/ExecutionService.java | 17 +- .../execution/TriggeredWatchStore.java | 59 ++++--- .../xpack/watcher/history/HistoryStore.java | 73 ++------ .../hipchat/IntegrationAccount.java | 1 - .../execution/TriggeredWatchStoreTests.java | 166 ++++++++++++++---- .../watcher/history/HistoryStoreTests.java | 52 ++++-- 7 files changed, 291 insertions(+), 163 deletions(-) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 33b79c38ccaba..32d492b78a7a7 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -5,9 +5,14 @@ */ package org.elasticsearch.xpack.watcher; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -20,13 +25,14 @@ import org.elasticsearch.common.inject.util.Providers; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -51,6 +57,7 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -184,12 +191,16 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.function.UnaryOperator; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; +import static org.elasticsearch.common.settings.Setting.Property.NodeScope; +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, ReloadablePlugin { @@ -201,6 +212,16 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa Setting.boolSetting("xpack.watcher.encrypt_sensitive_data", false, Setting.Property.NodeScope); public static final Setting MAX_STOP_TIMEOUT_SETTING = Setting.timeSetting("xpack.watcher.stop.timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); + private static final Setting SETTING_BULK_ACTIONS = + Setting.intSetting("xpack.watcher.bulk.actions", 1, 1, 10000, NodeScope); + private static final Setting SETTING_BULK_CONCURRENT_REQUESTS = + Setting.intSetting("xpack.watcher.bulk.concurrent_requests", 0, 0, 20, NodeScope); + private static final Setting SETTING_BULK_FLUSH_INTERVAL = + Setting.timeSetting("xpack.watcher.bulk.flush_interval", TimeValue.timeValueSeconds(1), NodeScope); + private static final Setting SETTING_BULK_SIZE = + Setting.byteSizeSetting("xpack.watcher.bulk.size", new ByteSizeValue(1, ByteSizeUnit.MB), + new ByteSizeValue(1, ByteSizeUnit.MB), new ByteSizeValue(10, ByteSizeUnit.MB), NodeScope); + public static final ScriptContext SCRIPT_SEARCH_CONTEXT = new ScriptContext<>("xpack", SearchScript.Factory.class); @@ -210,9 +231,10 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa public static final ScriptContext SCRIPT_TEMPLATE_CONTEXT = new ScriptContext<>("xpack_template", TemplateScript.Factory.class); - private static final Logger logger = Loggers.getLogger(Watcher.class); + private static final Logger logger = LogManager.getLogger(Watcher.class); private WatcherIndexingListener listener; private HttpClient httpClient; + private BulkProcessor bulkProcessor; protected final Settings settings; protected final boolean transportClient; @@ -318,7 +340,49 @@ public Collection createComponents(Client client, ClusterService cluster final InputRegistry inputRegistry = new InputRegistry(settings, inputFactories); inputFactories.put(ChainInput.TYPE, new ChainInputFactory(settings, inputRegistry)); - final HistoryStore historyStore = new HistoryStore(settings, client); + bulkProcessor = BulkProcessor.builder(ClientHelper.clientWithOrigin(client, WATCHER_ORIGIN), new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + if (response.hasFailures()) { + Map triggeredWatches = Arrays.stream(response.getItems()) + .filter(BulkItemResponse::isFailed) + .filter(r -> r.getIndex().startsWith(TriggeredWatchStoreField.INDEX_NAME)) + .collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage)); + if (triggeredWatches.isEmpty() == false) { + String failure = triggeredWatches.values().stream().collect(Collectors.joining(", ")); + logger.error("triggered watches could not be deleted {}, failure [{}]", + triggeredWatches.keySet(), Strings.substring(failure, 0, 2000)); + } + + Map overwrittenIds = Arrays.stream(response.getItems()) + .filter(BulkItemResponse::isFailed) + .filter(r -> r.getIndex().startsWith(HistoryStoreField.INDEX_PREFIX)) + .filter(r -> r.getVersion() > 1) + .collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage)); + if (overwrittenIds.isEmpty() == false) { + String failure = overwrittenIds.values().stream().collect(Collectors.joining(", ")); + logger.info("overwrote watch history entries {}, possible second execution of a triggered watch, failure [{}]", + overwrittenIds.keySet(), Strings.substring(failure, 0, 2000)); + } + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + logger.error("error executing bulk", failure); + } + }) + .setFlushInterval(SETTING_BULK_FLUSH_INTERVAL.get(settings)) + .setBulkActions(SETTING_BULK_ACTIONS.get(settings)) + .setBulkSize(SETTING_BULK_SIZE.get(settings)) + .setConcurrentRequests(SETTING_BULK_CONCURRENT_REQUESTS.get(settings)) + .build(); + + HistoryStore historyStore = new HistoryStore(settings, bulkProcessor); // schedulers final Set scheduleParsers = new HashSet<>(); @@ -340,7 +404,7 @@ public Collection createComponents(Client client, ClusterService cluster final TriggerService triggerService = new TriggerService(settings, triggerEngines); final TriggeredWatch.Parser triggeredWatchParser = new TriggeredWatch.Parser(settings, triggerService); - final TriggeredWatchStore triggeredWatchStore = new TriggeredWatchStore(settings, client, triggeredWatchParser); + final TriggeredWatchStore triggeredWatchStore = new TriggeredWatchStore(settings, client, triggeredWatchParser, bulkProcessor); final WatcherSearchTemplateService watcherSearchTemplateService = new WatcherSearchTemplateService(settings, scriptService, xContentRegistry); @@ -416,6 +480,12 @@ public List> getSettings() { settings.add(Setting.simpleString("xpack.watcher.execution.scroll.timeout", Setting.Property.NodeScope)); settings.add(WatcherLifeCycleService.SETTING_REQUIRE_MANUAL_START); + // bulk processor configuration + settings.add(SETTING_BULK_ACTIONS); + settings.add(SETTING_BULK_CONCURRENT_REQUESTS); + settings.add(SETTING_BULK_FLUSH_INTERVAL); + settings.add(SETTING_BULK_SIZE); + // notification services settings.addAll(SlackService.getSettings()); settings.addAll(EmailService.getSettings()); @@ -608,7 +678,15 @@ public List> getContexts() { @Override public void close() throws IOException { + bulkProcessor.flush(); IOUtils.closeWhileHandlingException(httpClient); + try { + if (bulkProcessor.awaitClose(10, TimeUnit.SECONDS) == false) { + logger.warn("failed to properly close watcher bulk processor"); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } } /** diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java index 7b77afb225e4b..3507bd4eb369f 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java @@ -320,11 +320,8 @@ record = createWatchRecord(record, ctx, e); // TODO log watch record in logger, when saving in history store failed, otherwise the info is gone! } } - try { - triggeredWatchStore.delete(ctx.id()); - } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to delete triggered watch [{}]", ctx.id()), e); - } + + triggeredWatchStore.delete(ctx.id()); } currentExecutions.get().remove(watchId); logger.debug("finished [{}]/[{}]", watchId, ctx.id()); @@ -412,14 +409,8 @@ private void executeAsync(WatchExecutionContext ctx, final TriggeredWatch trigge triggeredWatch.id()), exc); } - try { - triggeredWatchStore.delete(triggeredWatch.id()); - } catch (Exception exc) { - logger.error((Supplier) () -> - new ParameterizedMessage("Error deleting triggered watch store record for watch [{}] after thread pool " + - "rejection", triggeredWatch.id()), exc); - } - }; + triggeredWatchStore.delete(triggeredWatch.id()); + } } WatchRecord executeInner(WatchExecutionContext ctx) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java index e0164b5bdbd54..9a4b555d63355 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -24,7 +25,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -32,6 +32,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.execution.Wid; import org.elasticsearch.xpack.core.watcher.watch.Watch; @@ -46,8 +47,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; public class TriggeredWatchStore extends AbstractComponent { @@ -58,21 +57,17 @@ public class TriggeredWatchStore extends AbstractComponent { private final TimeValue defaultBulkTimeout; private final TimeValue defaultSearchTimeout; + private final BulkProcessor bulkProcessor; - public TriggeredWatchStore(Settings settings, Client client, TriggeredWatch.Parser triggeredWatchParser) { + public TriggeredWatchStore(Settings settings, Client client, TriggeredWatch.Parser triggeredWatchParser, BulkProcessor bulkProcessor) { super(settings); this.scrollSize = settings.getAsInt("xpack.watcher.execution.scroll.size", 1000); - this.client = client; + this.client = ClientHelper.clientWithOrigin(client, WATCHER_ORIGIN); this.scrollTimeout = settings.getAsTime("xpack.watcher.execution.scroll.timeout", TimeValue.timeValueMinutes(5)); this.defaultBulkTimeout = settings.getAsTime("xpack.watcher.internal.ops.bulk.default_timeout", TimeValue.timeValueSeconds(120)); this.defaultSearchTimeout = settings.getAsTime("xpack.watcher.internal.ops.search.default_timeout", TimeValue.timeValueSeconds(30)); this.triggeredWatchParser = triggeredWatchParser; - } - - public static boolean validate(ClusterState state) { - IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, state.metaData()); - return indexMetaData == null || (indexMetaData.getState() == IndexMetaData.State.OPEN && - state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive()); + this.bulkProcessor = bulkProcessor; } public void putAll(final List triggeredWatches, final ActionListener listener) throws IOException { @@ -81,8 +76,7 @@ public void putAll(final List triggeredWatches, final ActionList return; } - executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, createBulkRequest(triggeredWatches, - TriggeredWatchStoreField.DOC_TYPE), listener, client::bulk); + client.bulk(createBulkRequest(triggeredWatches), listener); } public BulkResponse putAll(final List triggeredWatches) throws IOException { @@ -94,14 +88,14 @@ public BulkResponse putAll(final List triggeredWatches) throws I /** * Create a bulk request from the triggered watches with a specified document type * @param triggeredWatches The list of triggered watches - * @param docType The document type to use, either the current one or legacy * @return The bulk request for the triggered watches * @throws IOException If a triggered watch could not be parsed to JSON, this exception is thrown */ - private BulkRequest createBulkRequest(final List triggeredWatches, String docType) throws IOException { + private BulkRequest createBulkRequest(final List triggeredWatches) throws IOException { BulkRequest request = new BulkRequest(); for (TriggeredWatch triggeredWatch : triggeredWatches) { - IndexRequest indexRequest = new IndexRequest(TriggeredWatchStoreField.INDEX_NAME, docType, triggeredWatch.id().value()); + IndexRequest indexRequest = new IndexRequest(TriggeredWatchStoreField.INDEX_NAME, TriggeredWatchStoreField.DOC_TYPE, + triggeredWatch.id().value()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { triggeredWatch.toXContent(builder, ToXContent.EMPTY_PARAMS); indexRequest.source(builder); @@ -112,12 +106,15 @@ private BulkRequest createBulkRequest(final List triggeredWatche return request; } + /** + * Delete a triggered watch entry. + * Note that this happens asynchronously, as these kind of requests are batched together to reduce the amount of concurrent requests. + * + * @param wid The ID os the triggered watch id + */ public void delete(Wid wid) { DeleteRequest request = new DeleteRequest(TriggeredWatchStoreField.INDEX_NAME, TriggeredWatchStoreField.DOC_TYPE, wid.value()); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { - client.delete(request); // FIXME shouldn't we wait before saying the delete was successful - } - logger.trace("successfully deleted triggered watch with id [{}]", wid); + bulkProcessor.add(request); } /** @@ -140,9 +137,9 @@ public Collection findTriggeredWatches(Collection watches return Collections.emptyList(); } - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { - client.admin().indices().refresh(new RefreshRequest(TriggeredWatchStoreField.INDEX_NAME)) - .actionGet(TimeValue.timeValueSeconds(5)); + try { + RefreshRequest request = new RefreshRequest(TriggeredWatchStoreField.INDEX_NAME); + client.admin().indices().refresh(request).actionGet(TimeValue.timeValueSeconds(5)); } catch (IndexNotFoundException e) { return Collections.emptyList(); } @@ -159,7 +156,7 @@ public Collection findTriggeredWatches(Collection watches .version(true)); SearchResponse response = null; - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + try { response = client.search(searchRequest).actionGet(defaultSearchTimeout); logger.debug("trying to find triggered watches for ids {}: found [{}] docs", ids, response.getHits().getTotalHits()); while (response.getHits().getHits().length != 0) { @@ -176,14 +173,18 @@ public Collection findTriggeredWatches(Collection watches } } finally { if (response != null) { - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.addScrollId(response.getScrollId()); - client.clearScroll(clearScrollRequest).actionGet(scrollTimeout); - } + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(response.getScrollId()); + client.clearScroll(clearScrollRequest).actionGet(scrollTimeout); } } return triggeredWatches; } + + public static boolean validate(ClusterState state) { + IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, state.metaData()); + return indexMetaData == null || (indexMetaData.getState() == IndexMetaData.State.OPEN && + state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive()); + } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java index 64e909a2f73d8..723568f8ba75d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java @@ -7,17 +7,14 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.engine.VersionConflictEngineException; -import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; @@ -26,37 +23,18 @@ import org.joda.time.DateTimeZone; import java.io.IOException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.core.watcher.support.Exceptions.ioException; -public class HistoryStore extends AbstractComponent implements AutoCloseable { +public class HistoryStore extends AbstractComponent { public static final String DOC_TYPE = "doc"; - private final Client client; + private final BulkProcessor bulkProcessor; - private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - private final Lock putUpdateLock = readWriteLock.readLock(); - private final Lock stopLock = readWriteLock.writeLock(); - - public HistoryStore(Settings settings, Client client) { + public HistoryStore(Settings settings, BulkProcessor bulkProcessor) { super(settings); - this.client = client; - } - - @Override - public void close() { - // This will block while put or update actions are underway - stopLock.lock(); - stopLock.unlock(); + this.bulkProcessor = bulkProcessor; } /** @@ -65,20 +43,14 @@ public void close() { */ public void put(WatchRecord watchRecord) throws Exception { String index = HistoryStoreField.getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime()); - putUpdateLock.lock(); - try (XContentBuilder builder = XContentFactory.jsonBuilder(); - ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); - IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) - .source(builder) - .opType(IndexRequest.OpType.CREATE); - client.index(request).actionGet(30, TimeUnit.SECONDS); - logger.debug("indexed watch history record [{}]", watchRecord.id().value()); + IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()).source(builder); + request.opType(IndexRequest.OpType.CREATE); + bulkProcessor.add(request); } catch (IOException ioe) { throw ioException("failed to persist watch record [{}]", ioe, watchRecord); - } finally { - putUpdateLock.unlock(); } } @@ -88,33 +60,14 @@ public void put(WatchRecord watchRecord) throws Exception { */ public void forcePut(WatchRecord watchRecord) { String index = HistoryStoreField.getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime()); - putUpdateLock.lock(); - try { - try (XContentBuilder builder = XContentFactory.jsonBuilder(); - ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); - IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) - .source(builder) - .opType(IndexRequest.OpType.CREATE); - client.index(request).get(30, TimeUnit.SECONDS); - logger.debug("indexed watch history record [{}]", watchRecord.id().value()); - } catch (VersionConflictEngineException vcee) { - watchRecord = new WatchRecord.MessageWatchRecord(watchRecord, ExecutionState.EXECUTED_MULTIPLE_TIMES, - "watch record [{ " + watchRecord.id() + " }] has been stored before, previous state [" + watchRecord.state() + "]"); - try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder(); - ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { - IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) - .source(xContentBuilder.value(watchRecord)); - client.index(request).get(30, TimeUnit.SECONDS); - } - logger.debug("overwrote watch history record [{}]", watchRecord.id().value()); - } - } catch (InterruptedException | ExecutionException | TimeoutException | IOException ioe) { + IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()).source(builder); + bulkProcessor.add(request); + } catch (IOException ioe) { final WatchRecord wr = watchRecord; logger.error((Supplier) () -> new ParameterizedMessage("failed to persist watch record [{}]", wr), ioe); - } finally { - putUpdateLock.unlock(); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java index 8af00ae8f8169..c33e788b61451 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java @@ -88,7 +88,6 @@ public SentMessages send(HipChatMessage message, @Nullable HttpProxy proxy) { sentMessages.add(SentMessages.SentMessage.responded(room, SentMessages.SentMessage.TargetType.ROOM, message, request, response)); } catch (Exception e) { - logger.error("failed to execute hipchat api http request", e); sentMessages.add(SentMessages.SentMessage.error(room, SentMessages.SentMessage.TargetType.ROOM, message, e)); } return new SentMessages(name, sentMessages); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index f38f4ad6a86e8..4012c8d24b5b5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -7,15 +7,23 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; -import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -65,6 +73,9 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import static java.util.Collections.singleton; import static org.hamcrest.Matchers.equalTo; @@ -79,7 +90,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; public class TriggeredWatchStoreTests extends ESTestCase { @@ -92,15 +102,34 @@ public class TriggeredWatchStoreTests extends ESTestCase { private Client client; private TriggeredWatch.Parser parser; private TriggeredWatchStore triggeredWatchStore; + private final Map bulks = new LinkedHashMap<>(); + private BulkProcessor.Listener listener = new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + bulks.put(request, response); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + throw new ElasticsearchException(failure); + } + }; @Before public void init() { + Settings settings = Settings.builder().put("node.name", randomAlphaOfLength(10)).build(); client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); + when(client.settings()).thenReturn(settings); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); parser = mock(TriggeredWatch.Parser.class); - triggeredWatchStore = new TriggeredWatchStore(Settings.EMPTY, client, parser); + BulkProcessor bulkProcessor = BulkProcessor.builder(client, listener).setConcurrentRequests(0).setBulkActions(1).build(); + triggeredWatchStore = new TriggeredWatchStore(settings, client, parser, bulkProcessor); } public void testFindTriggeredWatchesEmptyCollection() { @@ -174,14 +203,11 @@ public void testFindTriggeredWatchesGoodCase() { csBuilder.routingTable(routingTableBuilder.build()); ClusterState cs = csBuilder.build(); - RefreshResponse refreshResponse = mockRefreshResponse(1, 1); - AdminClient adminClient = mock(AdminClient.class); - when(client.admin()).thenReturn(adminClient); - IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class); - when(adminClient.indices()).thenReturn(indicesAdminClient); - PlainActionFuture future = PlainActionFuture.newFuture(); - when(indicesAdminClient.refresh(any())).thenReturn(future); - future.onResponse(refreshResponse); + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onResponse(mockRefreshResponse(1, 1)); + return null; + }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); SearchResponse searchResponse1 = mock(SearchResponse.class); when(searchResponse1.getSuccessfulShards()).thenReturn(1); @@ -194,9 +220,11 @@ public void testFindTriggeredWatchesGoodCase() { SearchHits hits = new SearchHits(new SearchHit[]{hit}, 1, 1.0f); when(searchResponse1.getHits()).thenReturn(hits); when(searchResponse1.getScrollId()).thenReturn("_scrollId"); - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); - when(client.search(any(SearchRequest.class))).thenReturn(searchFuture); - searchFuture.onResponse(searchResponse1); + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onResponse(searchResponse1); + return null; + }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); // First return a scroll response with a single hit and then with no hits hit = new SearchHit(0, "second_foo", new Text(TriggeredWatchStoreField.DOC_TYPE), null); @@ -209,24 +237,27 @@ public void testFindTriggeredWatchesGoodCase() { SearchResponse searchResponse3 = new SearchResponse(InternalSearchResponse.empty(), "_scrollId2", 1, 1, 0, 1, null, null); doAnswer(invocation -> { - SearchScrollRequest request = (SearchScrollRequest) invocation.getArguments()[0]; - PlainActionFuture searchScrollFuture = PlainActionFuture.newFuture(); + SearchScrollRequest request = (SearchScrollRequest) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; if (request.scrollId().equals("_scrollId")) { - searchScrollFuture.onResponse(searchResponse2); + listener.onResponse(searchResponse2); } else if (request.scrollId().equals("_scrollId1")) { - searchScrollFuture.onResponse(searchResponse3); + listener.onResponse(searchResponse3); } else { - searchScrollFuture.onFailure(new ElasticsearchException("test issue")); + listener.onFailure(new ElasticsearchException("test issue")); } - return searchScrollFuture; - }).when(client).searchScroll(any()); + return null; + }).when(client).execute(eq(SearchScrollAction.INSTANCE), any(), any()); TriggeredWatch triggeredWatch = mock(TriggeredWatch.class); when(parser.parse(eq("_id"), eq(1L), any(BytesReference.class))).thenReturn(triggeredWatch); - PlainActionFuture clearScrollResponseFuture = PlainActionFuture.newFuture(); - when(client.clearScroll(any())).thenReturn(clearScrollResponseFuture); - clearScrollResponseFuture.onResponse(new ClearScrollResponse(true, 1)); + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onResponse(new ClearScrollResponse(true, 1)); + return null; + + }).when(client).execute(eq(ClearScrollAction.INSTANCE), any(), any()); assertThat(TriggeredWatchStore.validate(cs), is(true)); DateTime now = DateTime.now(UTC); @@ -251,10 +282,10 @@ public void testFindTriggeredWatchesGoodCase() { assertThat(triggeredWatches, notNullValue()); assertThat(triggeredWatches, hasSize(watches.size())); - verify(client.admin().indices(), times(1)).refresh(any()); - verify(client, times(1)).search(any(SearchRequest.class)); - verify(client, times(2)).searchScroll(any()); - verify(client, times(1)).clearScroll(any()); + verify(client, times(1)).execute(eq(RefreshAction.INSTANCE), any(), any()); + verify(client, times(1)).execute(eq(SearchAction.INSTANCE), any(), any()); + verify(client, times(2)).execute(eq(SearchScrollAction.INSTANCE), any(), any()); + verify(client, times(1)).execute(eq(ClearScrollAction.INSTANCE), any(), any()); } // the elasticsearch migration helper is doing reindex using aliases, so we have to @@ -332,7 +363,7 @@ public void testTriggeredWatchesIndexDoesNotExistOnStartup() { assertThat(TriggeredWatchStore.validate(cs), is(true)); Watch watch = mock(Watch.class); triggeredWatchStore.findTriggeredWatches(Collections.singletonList(watch), cs); - verifyZeroInteractions(client); + verify(client, times(0)).execute(any(), any(), any()); } public void testIndexNotFoundButInMetaData() { @@ -344,13 +375,11 @@ public void testIndexNotFoundButInMetaData() { ClusterState cs = csBuilder.build(); Watch watch = mock(Watch.class); - AdminClient adminClient = mock(AdminClient.class); - when(client.admin()).thenReturn(adminClient); - IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class); - when(adminClient.indices()).thenReturn(indicesAdminClient); - PlainActionFuture future = PlainActionFuture.newFuture(); - when(indicesAdminClient.refresh(any())).thenReturn(future); - future.onFailure(new IndexNotFoundException(TriggeredWatchStoreField.INDEX_NAME)); + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onFailure(new IndexNotFoundException(TriggeredWatchStoreField.INDEX_NAME)); + return null; + }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(Collections.singletonList(watch), cs); assertThat(triggeredWatches, hasSize(0)); @@ -381,6 +410,65 @@ public void testTriggeredWatchParser() throws Exception { assertThat(BytesReference.bytes(jsonBuilder).utf8ToString(), equalTo(BytesReference.bytes(jsonBuilder2).utf8ToString())); } + public void testPutTriggeredWatches() throws Exception { + DateTime now = DateTime.now(UTC); + int numberOfTriggeredWatches = randomIntBetween(1, 100); + + List triggeredWatches = new ArrayList<>(numberOfTriggeredWatches); + for (int i = 0; i < numberOfTriggeredWatches; i++) { + triggeredWatches.add(new TriggeredWatch(new Wid("watch_id_", now), new ScheduleTriggerEvent("watch_id", now, now))); + } + + doAnswer(invocation -> { + BulkRequest bulkRequest = (BulkRequest) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + + int size = bulkRequest.requests().size(); + BulkItemResponse[] bulkItemResponse = new BulkItemResponse[size]; + for (int i = 0; i < size; i++) { + DocWriteRequest writeRequest = bulkRequest.requests().get(i); + ShardId shardId = new ShardId(TriggeredWatchStoreField.INDEX_NAME, "uuid", 0); + IndexResponse indexResponse = new IndexResponse(shardId, writeRequest.type(), writeRequest.id(), 1, 1, 1, true); + bulkItemResponse[i] = new BulkItemResponse(0, writeRequest.opType(), indexResponse); + } + + listener.onResponse(new BulkResponse(bulkItemResponse, 123)); + return null; + }).when(client).execute(eq(BulkAction.INSTANCE), any(), any()); + + + BulkResponse response = triggeredWatchStore.putAll(triggeredWatches); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems().length, is(numberOfTriggeredWatches)); + } + + public void testDeleteTriggeredWatches() throws Exception { + DateTime now = DateTime.now(UTC); + + doAnswer(invocation -> { + BulkRequest bulkRequest = (BulkRequest) invocation.getArguments()[0]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + + int size = bulkRequest.requests().size(); + BulkItemResponse[] bulkItemResponse = new BulkItemResponse[size]; + for (int i = 0; i < size; i++) { + DocWriteRequest writeRequest = bulkRequest.requests().get(i); + ShardId shardId = new ShardId(TriggeredWatchStoreField.INDEX_NAME, "uuid", 0); + IndexResponse indexResponse = new IndexResponse(shardId, writeRequest.type(), writeRequest.id(), 1, 1, 1, true); + bulkItemResponse[i] = new BulkItemResponse(0, writeRequest.opType(), indexResponse); + } + + listener.onResponse(new BulkResponse(bulkItemResponse, 123)); + return null; + }).when(client).bulk(any(), any()); + + triggeredWatchStore.delete(new Wid("watch_id_", now)); + assertThat(bulks.keySet(), hasSize(1)); + BulkResponse response = bulks.values().iterator().next(); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems().length, is(1)); + } + private RefreshResponse mockRefreshResponse(int total, int successful) { RefreshResponse refreshResponse = mock(RefreshResponse.class); when(refreshResponse.getTotalShards()).thenReturn(total); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java index 8f1cce9305571..19bf1ba5a1fd3 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java @@ -7,10 +7,14 @@ import org.apache.http.HttpStatus; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest.OpType; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -42,6 +46,7 @@ import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.hamcrest.core.IsEqual.equalTo; import static org.joda.time.DateTimeZone.UTC; @@ -58,11 +63,15 @@ public class HistoryStoreTests extends ESTestCase { @Before public void init() { + Settings settings = Settings.builder().put("node.name", randomAlphaOfLength(10)).build(); client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - historyStore = new HistoryStore(Settings.EMPTY, client); + when(client.settings()).thenReturn(settings); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(settings)); + BulkProcessor.Listener listener = mock(BulkProcessor.Listener.class); + BulkProcessor bulkProcessor = BulkProcessor.builder(client, listener).setConcurrentRequests(0).setBulkActions(1).build(); + historyStore = new HistoryStore(settings, bulkProcessor); } public void testPut() throws Exception { @@ -75,19 +84,21 @@ public void testPut() throws Exception { IndexResponse indexResponse = mock(IndexResponse.class); doAnswer(invocation -> { - IndexRequest request = (IndexRequest) invocation.getArguments()[0]; - PlainActionFuture indexFuture = PlainActionFuture.newFuture(); - if (request.id().equals(wid.value()) && request.type().equals(HistoryStore.DOC_TYPE) && request.opType() == OpType.CREATE - && request.index().equals(index)) { - indexFuture.onResponse(indexResponse); + BulkRequest request = (BulkRequest) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + + IndexRequest indexRequest = (IndexRequest) request.requests().get(0); + if (indexRequest.id().equals(wid.value()) && indexRequest.type().equals(HistoryStore.DOC_TYPE) && + indexRequest.opType() == OpType.CREATE && indexRequest.index().equals(index)) { + listener.onResponse(new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse(1, OpType.CREATE, indexResponse) }, 1)); } else { - indexFuture.onFailure(new ElasticsearchException("test issue")); + listener.onFailure(new ElasticsearchException("test issue")); } - return indexFuture; - }).when(client).index(any()); + return null; + }).when(client).bulk(any(), any()); historyStore.put(watchRecord); - verify(client).index(any()); + verify(client).bulk(any(), any()); } public void testIndexNameGeneration() { @@ -139,10 +150,15 @@ public void testStoreWithHideSecrets() throws Exception { } watchRecord.result().actionsResults().put(JiraAction.TYPE, result); - PlainActionFuture indexResponseFuture = PlainActionFuture.newFuture(); - indexResponseFuture.onResponse(mock(IndexResponse.class)); - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(IndexRequest.class); - when(client.index(requestCaptor.capture())).thenReturn(indexResponseFuture); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(BulkRequest.class); + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + + IndexResponse indexResponse = mock(IndexResponse.class); + listener.onResponse(new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse(1, OpType.CREATE, indexResponse) }, 1)); + return null; + }).when(client).bulk(requestCaptor.capture(), any()); + if (randomBoolean()) { historyStore.put(watchRecord); } else { @@ -150,7 +166,9 @@ public void testStoreWithHideSecrets() throws Exception { } assertThat(requestCaptor.getAllValues(), hasSize(1)); - String indexedJson = requestCaptor.getValue().source().utf8ToString(); + assertThat(requestCaptor.getValue().requests().get(0), instanceOf(IndexRequest.class)); + IndexRequest capturedIndexRequest = (IndexRequest) requestCaptor.getValue().requests().get(0); + String indexedJson = capturedIndexRequest.source().utf8ToString(); assertThat(indexedJson, containsString(username)); assertThat(indexedJson, not(containsString(password))); } From 87cedef3cfca9b8a03100f7de4a3c1d40ac260d9 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 18 Sep 2018 10:29:02 +0200 Subject: [PATCH 11/46] NETWORKING:Def CName in Http Publish Addr to True (#33631) * Follow up to #32806 setting the setting to true for 7.x --- .../main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 3 --- docs/build.gradle | 2 -- modules/lang-painless/build.gradle | 1 - server/src/main/java/org/elasticsearch/http/HttpInfo.java | 2 +- 4 files changed, 1 insertion(+), 7 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 06c0827f1ff27..0553f9395958d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -828,9 +828,6 @@ class BuildPlugin implements Plugin { // TODO: remove this once ctx isn't added to update script params in 7.0 systemProperty 'es.scripting.update.ctx_in_params', 'false' - //TODO: remove this once the cname is prepended to the address by default in 7.0 - systemProperty 'es.http.cname_in_publish_address', 'true' - // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM if (project.inFipsJvm) { systemProperty 'javax.net.ssl.trustStorePassword', 'password' diff --git a/docs/build.gradle b/docs/build.gradle index aa075d05cd5dd..864567ba8358a 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -56,8 +56,6 @@ integTestCluster { // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults systemProperty 'es.scripting.update.ctx_in_params', 'false' - //TODO: remove this once the cname is prepended to the address by default in 7.0 - systemProperty 'es.http.cname_in_publish_address', 'true' } // remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 6f68c667fe63d..b3cab595201a5 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -25,7 +25,6 @@ esplugin { integTestCluster { module project.project(':modules:mapper-extras') systemProperty 'es.scripting.update.ctx_in_params', 'false' - systemProperty 'es.http.cname_in_publish_address', 'true' } dependencies { diff --git a/server/src/main/java/org/elasticsearch/http/HttpInfo.java b/server/src/main/java/org/elasticsearch/http/HttpInfo.java index aece813199479..22bcd31850d29 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpInfo.java +++ b/server/src/main/java/org/elasticsearch/http/HttpInfo.java @@ -41,7 +41,7 @@ public class HttpInfo implements Writeable, ToXContentFragment { /** Whether to add hostname to publish host field when serializing. */ private static final boolean CNAME_IN_PUBLISH_HOST = - parseBoolean(System.getProperty("es.http.cname_in_publish_address"), false); + parseBoolean(System.getProperty("es.http.cname_in_publish_address"), true); private final BoundTransportAddress address; private final long maxContentLength; From ab9c28a2b11142f86d6ca2c3bb52ab6bd5d39558 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Tue, 18 Sep 2018 11:44:52 +0300 Subject: [PATCH 12/46] SQL: Grammar tweak for number declarations (#33767) Consider plus and minus as part of a number declaration (to avoid the minus be treated as a negation). Close #33765 --- x-pack/plugin/sql/src/main/antlr/SqlBase.g4 | 4 +- .../xpack/sql/parser/SqlBaseParser.java | 249 +++++++++--------- .../xpack/sql/parser/ExpressionTests.java | 44 +++- 3 files changed, 157 insertions(+), 140 deletions(-) diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 index 9af2bd6a01129..396cc70920aeb 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 @@ -305,8 +305,8 @@ unquoteIdentifier ; number - : DECIMAL_VALUE #decimalLiteral - | INTEGER_VALUE #integerLiteral + : (PLUS | MINUS)? DECIMAL_VALUE #decimalLiteral + | (PLUS | MINUS)? INTEGER_VALUE #integerLiteral ; string diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java index ebf5b0cb09d76..164eacd402bf7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java @@ -3729,54 +3729,9 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti enterOuterAlt(_localctx, 1); { setState(548); - switch (_input.LA(1)) { - case T__0: - case ANALYZE: - case ANALYZED: - case CAST: - case CATALOGS: - case COLUMNS: - case DEBUG: - case EXECUTABLE: - case EXPLAIN: - case EXTRACT: - case FALSE: - case FORMAT: - case FUNCTIONS: - case GRAPHVIZ: - case LEFT: - case MAPPED: - case NULL: - case OPTIMIZED: - case PARSED: - case PHYSICAL: - case PLAN: - case RIGHT: - case RLIKE: - case QUERY: - case SCHEMAS: - case SHOW: - case SYS: - case TABLES: - case TEXT: - case TRUE: - case TYPE: - case TYPES: - case VERIFY: - case FUNCTION_ESC: - case DATE_ESC: - case TIME_ESC: - case TIMESTAMP_ESC: - case GUID_ESC: - case ASTERISK: - case PARAM: - case STRING: - case INTEGER_VALUE: - case DECIMAL_VALUE: - case IDENTIFIER: - case DIGIT_IDENTIFIER: - case QUOTED_IDENTIFIER: - case BACKQUOTED_IDENTIFIER: + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,74,_ctx) ) { + case 1: { _localctx = new ValueExpressionDefaultContext(_localctx); _ctx = _localctx; @@ -3786,8 +3741,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti primaryExpression(); } break; - case PLUS: - case MINUS: + case 2: { _localctx = new ArithmeticUnaryContext(_localctx); _ctx = _localctx; @@ -3804,8 +3758,6 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti valueExpression(4); } break; - default: - throw new NoViableAltException(this); } _ctx.stop = _input.LT(-1); setState(562); @@ -4925,6 +4877,8 @@ public final ConstantContext constant() throws RecognitionException { match(NULL); } break; + case PLUS: + case MINUS: case INTEGER_VALUE: case DECIMAL_VALUE: _localctx = new NumericLiteralContext(_localctx); @@ -5654,6 +5608,8 @@ public void copyFrom(NumberContext ctx) { } public static class DecimalLiteralContext extends NumberContext { public TerminalNode DECIMAL_VALUE() { return getToken(SqlBaseParser.DECIMAL_VALUE, 0); } + public TerminalNode PLUS() { return getToken(SqlBaseParser.PLUS, 0); } + public TerminalNode MINUS() { return getToken(SqlBaseParser.MINUS, 0); } public DecimalLiteralContext(NumberContext ctx) { copyFrom(ctx); } @Override public void enterRule(ParseTreeListener listener) { @@ -5671,6 +5627,8 @@ public T accept(ParseTreeVisitor visitor) { } public static class IntegerLiteralContext extends NumberContext { public TerminalNode INTEGER_VALUE() { return getToken(SqlBaseParser.INTEGER_VALUE, 0); } + public TerminalNode PLUS() { return getToken(SqlBaseParser.PLUS, 0); } + public TerminalNode MINUS() { return getToken(SqlBaseParser.MINUS, 0); } public IntegerLiteralContext(NumberContext ctx) { copyFrom(ctx); } @Override public void enterRule(ParseTreeListener listener) { @@ -5690,27 +5648,55 @@ public T accept(ParseTreeVisitor visitor) { public final NumberContext number() throws RecognitionException { NumberContext _localctx = new NumberContext(_ctx, getState()); enterRule(_localctx, 92, RULE_number); + int _la; try { - setState(717); - switch (_input.LA(1)) { - case DECIMAL_VALUE: + setState(723); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,97,_ctx) ) { + case 1: _localctx = new DecimalLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(715); + setState(716); + _la = _input.LA(1); + if (_la==PLUS || _la==MINUS) { + { + setState(715); + _la = _input.LA(1); + if ( !(_la==PLUS || _la==MINUS) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + + setState(718); match(DECIMAL_VALUE); } break; - case INTEGER_VALUE: + case 2: _localctx = new IntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(716); + setState(720); + _la = _input.LA(1); + if (_la==PLUS || _la==MINUS) { + { + setState(719); + _la = _input.LA(1); + if ( !(_la==PLUS || _la==MINUS) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + + setState(722); match(INTEGER_VALUE); } break; - default: - throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -5753,7 +5739,7 @@ public final StringContext string() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(719); + setState(725); _la = _input.LA(1); if ( !(_la==PARAM || _la==STRING) ) { _errHandler.recoverInline(this); @@ -5825,7 +5811,7 @@ public final NonReservedContext nonReserved() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(721); + setState(727); _la = _input.LA(1); if ( !(((((_la - 6)) & ~0x3f) == 0 && ((1L << (_la - 6)) & ((1L << (ANALYZE - 6)) | (1L << (ANALYZED - 6)) | (1L << (CATALOGS - 6)) | (1L << (COLUMNS - 6)) | (1L << (DEBUG - 6)) | (1L << (EXECUTABLE - 6)) | (1L << (EXPLAIN - 6)) | (1L << (FORMAT - 6)) | (1L << (FUNCTIONS - 6)) | (1L << (GRAPHVIZ - 6)) | (1L << (MAPPED - 6)) | (1L << (OPTIMIZED - 6)) | (1L << (PARSED - 6)) | (1L << (PHYSICAL - 6)) | (1L << (PLAN - 6)) | (1L << (RLIKE - 6)) | (1L << (QUERY - 6)) | (1L << (SCHEMAS - 6)) | (1L << (SHOW - 6)) | (1L << (SYS - 6)) | (1L << (TABLES - 6)) | (1L << (TEXT - 6)) | (1L << (TYPE - 6)) | (1L << (TYPES - 6)) | (1L << (VERIFY - 6)))) != 0)) ) { _errHandler.recoverInline(this); @@ -5876,7 +5862,7 @@ private boolean valueExpression_sempred(ValueExpressionContext _localctx, int pr } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3l\u02d6\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3l\u02dc\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ @@ -5930,62 +5916,63 @@ private boolean valueExpression_sempred(ValueExpressionContext _localctx, int pr "\u028d\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'"+ "\3\'\5\'\u02a1\n\'\3(\3(\3)\3)\3*\3*\3+\3+\3+\7+\u02ac\n+\f+\16+\u02af"+ "\13+\3+\3+\3,\3,\5,\u02b5\n,\3-\3-\3-\5-\u02ba\n-\3-\3-\3-\3-\5-\u02c0"+ - "\n-\3-\5-\u02c3\n-\3.\3.\5.\u02c7\n.\3/\3/\3/\5/\u02cc\n/\3\60\3\60\5"+ - "\60\u02d0\n\60\3\61\3\61\3\62\3\62\3\62\2\4.:\63\2\4\6\b\n\f\16\20\22"+ - "\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`b\2\20\b"+ - "\2\7\7\t\t\31\31,,\62\62\66\66\4\2\"\"BB\4\2\t\t\62\62\4\2\37\37%%\3\2"+ - "\25\26\4\2\7\7aa\4\2\r\r\25\25\4\2\7\7\27\27\3\2XY\3\2Z\\\3\2RW\4\2\35"+ - "\35CC\3\2_`\20\2\b\t\22\24\31\31\33\33\36\36!\",,\62\62\668:<>?ABDEGG"+ - "\u0330\2d\3\2\2\2\4g\3\2\2\2\6\u00ce\3\2\2\2\b\u00d9\3\2\2\2\n\u00dd\3"+ - "\2\2\2\f\u00f2\3\2\2\2\16\u00f9\3\2\2\2\20\u00fb\3\2\2\2\22\u00ff\3\2"+ - "\2\2\24\u011b\3\2\2\2\26\u0125\3\2\2\2\30\u012f\3\2\2\2\32\u013e\3\2\2"+ - "\2\34\u0140\3\2\2\2\36\u0146\3\2\2\2 \u0148\3\2\2\2\"\u014f\3\2\2\2$\u0161"+ - "\3\2\2\2&\u0172\3\2\2\2(\u0182\3\2\2\2*\u019d\3\2\2\2,\u019f\3\2\2\2."+ - "\u01d2\3\2\2\2\60\u01df\3\2\2\2\62\u0211\3\2\2\2\64\u0213\3\2\2\2\66\u0216"+ - "\3\2\2\28\u0220\3\2\2\2:\u0226\3\2\2\2<\u024c\3\2\2\2>\u0253\3\2\2\2@"+ - "\u0255\3\2\2\2B\u0261\3\2\2\2D\u0263\3\2\2\2F\u026f\3\2\2\2H\u0271\3\2"+ - "\2\2J\u0285\3\2\2\2L\u02a0\3\2\2\2N\u02a2\3\2\2\2P\u02a4\3\2\2\2R\u02a6"+ - "\3\2\2\2T\u02ad\3\2\2\2V\u02b4\3\2\2\2X\u02c2\3\2\2\2Z\u02c6\3\2\2\2\\"+ - "\u02cb\3\2\2\2^\u02cf\3\2\2\2`\u02d1\3\2\2\2b\u02d3\3\2\2\2de\5\6\4\2"+ - "ef\7\2\2\3f\3\3\2\2\2gh\5,\27\2hi\7\2\2\3i\5\3\2\2\2j\u00cf\5\b\5\2ky"+ - "\7\33\2\2lu\7\3\2\2mn\78\2\2nt\t\2\2\2op\7\36\2\2pt\t\3\2\2qr\7G\2\2r"+ - "t\5P)\2sm\3\2\2\2so\3\2\2\2sq\3\2\2\2tw\3\2\2\2us\3\2\2\2uv\3\2\2\2vx"+ - "\3\2\2\2wu\3\2\2\2xz\7\4\2\2yl\3\2\2\2yz\3\2\2\2z{\3\2\2\2{\u00cf\5\6"+ - "\4\2|\u0088\7\24\2\2}\u0084\7\3\2\2~\177\78\2\2\177\u0083\t\4\2\2\u0080"+ - "\u0081\7\36\2\2\u0081\u0083\t\3\2\2\u0082~\3\2\2\2\u0082\u0080\3\2\2\2"+ - "\u0083\u0086\3\2\2\2\u0084\u0082\3\2\2\2\u0084\u0085\3\2\2\2\u0085\u0087"+ - "\3\2\2\2\u0086\u0084\3\2\2\2\u0087\u0089\7\4\2\2\u0088}\3\2\2\2\u0088"+ - "\u0089\3\2\2\2\u0089\u008a\3\2\2\2\u008a\u00cf\5\6\4\2\u008b\u008c\7>"+ - "\2\2\u008c\u008f\7A\2\2\u008d\u0090\5\64\33\2\u008e\u0090\5X-\2\u008f"+ - "\u008d\3\2\2\2\u008f\u008e\3\2\2\2\u008f\u0090\3\2\2\2\u0090\u00cf\3\2"+ - "\2\2\u0091\u0092\7>\2\2\u0092\u0093\7\23\2\2\u0093\u0096\t\5\2\2\u0094"+ - "\u0097\5\64\33\2\u0095\u0097\5X-\2\u0096\u0094\3\2\2\2\u0096\u0095\3\2"+ - "\2\2\u0097\u00cf\3\2\2\2\u0098\u009b\t\6\2\2\u0099\u009c\5\64\33\2\u009a"+ - "\u009c\5X-\2\u009b\u0099\3\2\2\2\u009b\u009a\3\2\2\2\u009c\u00cf\3\2\2"+ - "\2\u009d\u009e\7>\2\2\u009e\u00a0\7!\2\2\u009f\u00a1\5\64\33\2\u00a0\u009f"+ - "\3\2\2\2\u00a0\u00a1\3\2\2\2\u00a1\u00cf\3\2\2\2\u00a2\u00a3\7>\2\2\u00a3"+ - "\u00cf\7<\2\2\u00a4\u00a5\7?\2\2\u00a5\u00cf\7\22\2\2\u00a6\u00a7\7?\2"+ - "\2\u00a7\u00aa\7A\2\2\u00a8\u00a9\7\21\2\2\u00a9\u00ab\5\64\33\2\u00aa"+ - "\u00a8\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\u00ae\3\2\2\2\u00ac\u00af\5\64"+ - "\33\2\u00ad\u00af\5X-\2\u00ae\u00ac\3\2\2\2\u00ae\u00ad\3\2\2\2\u00ae"+ - "\u00af\3\2\2\2\u00af\u00b9\3\2\2\2\u00b0\u00b1\7D\2\2\u00b1\u00b6\5`\61"+ - "\2\u00b2\u00b3\7\5\2\2\u00b3\u00b5\5`\61\2\u00b4\u00b2\3\2\2\2\u00b5\u00b8"+ - "\3\2\2\2\u00b6\u00b4\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00ba\3\2\2\2\u00b8"+ - "\u00b6\3\2\2\2\u00b9\u00b0\3\2\2\2\u00b9\u00ba\3\2\2\2\u00ba\u00cf\3\2"+ - "\2\2\u00bb\u00bc\7?\2\2\u00bc\u00bf\7\23\2\2\u00bd\u00be\7\21\2\2\u00be"+ - "\u00c0\5`\61\2\u00bf\u00bd\3\2\2\2\u00bf\u00c0\3\2\2\2\u00c0\u00c4\3\2"+ - "\2\2\u00c1\u00c2\7@\2\2\u00c2\u00c5\5\64\33\2\u00c3\u00c5\5X-\2\u00c4"+ - "\u00c1\3\2\2\2\u00c4\u00c3\3\2\2\2\u00c4\u00c5\3\2\2\2\u00c5\u00c7\3\2"+ - "\2\2\u00c6\u00c8\5\64\33\2\u00c7\u00c6\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8"+ - "\u00cf\3\2\2\2\u00c9\u00ca\7?\2\2\u00ca\u00cf\7E\2\2\u00cb\u00cc\7?\2"+ - "\2\u00cc\u00cd\7@\2\2\u00cd\u00cf\7E\2\2\u00cej\3\2\2\2\u00cek\3\2\2\2"+ - "\u00ce|\3\2\2\2\u00ce\u008b\3\2\2\2\u00ce\u0091\3\2\2\2\u00ce\u0098\3"+ - "\2\2\2\u00ce\u009d\3\2\2\2\u00ce\u00a2\3\2\2\2\u00ce\u00a4\3\2\2\2\u00ce"+ - "\u00a6\3\2\2\2\u00ce\u00bb\3\2\2\2\u00ce\u00c9\3\2\2\2\u00ce\u00cb\3\2"+ - "\2\2\u00cf\7\3\2\2\2\u00d0\u00d1\7I\2\2\u00d1\u00d6\5\34\17\2\u00d2\u00d3"+ - "\7\5\2\2\u00d3\u00d5\5\34\17\2\u00d4\u00d2\3\2\2\2\u00d5\u00d8\3\2\2\2"+ - "\u00d6\u00d4\3\2\2\2\u00d6\u00d7\3\2\2\2\u00d7\u00da\3\2\2\2\u00d8\u00d6"+ - "\3\2\2\2\u00d9\u00d0\3\2\2\2\u00d9\u00da\3\2\2\2\u00da\u00db\3\2\2\2\u00db"+ + "\n-\3-\5-\u02c3\n-\3.\3.\5.\u02c7\n.\3/\3/\3/\5/\u02cc\n/\3\60\5\60\u02cf"+ + "\n\60\3\60\3\60\5\60\u02d3\n\60\3\60\5\60\u02d6\n\60\3\61\3\61\3\62\3"+ + "\62\3\62\2\4.:\63\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62"+ + "\64\668:<>@BDFHJLNPRTVXZ\\^`b\2\20\b\2\7\7\t\t\31\31,,\62\62\66\66\4\2"+ + "\"\"BB\4\2\t\t\62\62\4\2\37\37%%\3\2\25\26\4\2\7\7aa\4\2\r\r\25\25\4\2"+ + "\7\7\27\27\3\2XY\3\2Z\\\3\2RW\4\2\35\35CC\3\2_`\20\2\b\t\22\24\31\31\33"+ + "\33\36\36!\",,\62\62\668:<>?ABDEGG\u0338\2d\3\2\2\2\4g\3\2\2\2\6\u00ce"+ + "\3\2\2\2\b\u00d9\3\2\2\2\n\u00dd\3\2\2\2\f\u00f2\3\2\2\2\16\u00f9\3\2"+ + "\2\2\20\u00fb\3\2\2\2\22\u00ff\3\2\2\2\24\u011b\3\2\2\2\26\u0125\3\2\2"+ + "\2\30\u012f\3\2\2\2\32\u013e\3\2\2\2\34\u0140\3\2\2\2\36\u0146\3\2\2\2"+ + " \u0148\3\2\2\2\"\u014f\3\2\2\2$\u0161\3\2\2\2&\u0172\3\2\2\2(\u0182\3"+ + "\2\2\2*\u019d\3\2\2\2,\u019f\3\2\2\2.\u01d2\3\2\2\2\60\u01df\3\2\2\2\62"+ + "\u0211\3\2\2\2\64\u0213\3\2\2\2\66\u0216\3\2\2\28\u0220\3\2\2\2:\u0226"+ + "\3\2\2\2<\u024c\3\2\2\2>\u0253\3\2\2\2@\u0255\3\2\2\2B\u0261\3\2\2\2D"+ + "\u0263\3\2\2\2F\u026f\3\2\2\2H\u0271\3\2\2\2J\u0285\3\2\2\2L\u02a0\3\2"+ + "\2\2N\u02a2\3\2\2\2P\u02a4\3\2\2\2R\u02a6\3\2\2\2T\u02ad\3\2\2\2V\u02b4"+ + "\3\2\2\2X\u02c2\3\2\2\2Z\u02c6\3\2\2\2\\\u02cb\3\2\2\2^\u02d5\3\2\2\2"+ + "`\u02d7\3\2\2\2b\u02d9\3\2\2\2de\5\6\4\2ef\7\2\2\3f\3\3\2\2\2gh\5,\27"+ + "\2hi\7\2\2\3i\5\3\2\2\2j\u00cf\5\b\5\2ky\7\33\2\2lu\7\3\2\2mn\78\2\2n"+ + "t\t\2\2\2op\7\36\2\2pt\t\3\2\2qr\7G\2\2rt\5P)\2sm\3\2\2\2so\3\2\2\2sq"+ + "\3\2\2\2tw\3\2\2\2us\3\2\2\2uv\3\2\2\2vx\3\2\2\2wu\3\2\2\2xz\7\4\2\2y"+ + "l\3\2\2\2yz\3\2\2\2z{\3\2\2\2{\u00cf\5\6\4\2|\u0088\7\24\2\2}\u0084\7"+ + "\3\2\2~\177\78\2\2\177\u0083\t\4\2\2\u0080\u0081\7\36\2\2\u0081\u0083"+ + "\t\3\2\2\u0082~\3\2\2\2\u0082\u0080\3\2\2\2\u0083\u0086\3\2\2\2\u0084"+ + "\u0082\3\2\2\2\u0084\u0085\3\2\2\2\u0085\u0087\3\2\2\2\u0086\u0084\3\2"+ + "\2\2\u0087\u0089\7\4\2\2\u0088}\3\2\2\2\u0088\u0089\3\2\2\2\u0089\u008a"+ + "\3\2\2\2\u008a\u00cf\5\6\4\2\u008b\u008c\7>\2\2\u008c\u008f\7A\2\2\u008d"+ + "\u0090\5\64\33\2\u008e\u0090\5X-\2\u008f\u008d\3\2\2\2\u008f\u008e\3\2"+ + "\2\2\u008f\u0090\3\2\2\2\u0090\u00cf\3\2\2\2\u0091\u0092\7>\2\2\u0092"+ + "\u0093\7\23\2\2\u0093\u0096\t\5\2\2\u0094\u0097\5\64\33\2\u0095\u0097"+ + "\5X-\2\u0096\u0094\3\2\2\2\u0096\u0095\3\2\2\2\u0097\u00cf\3\2\2\2\u0098"+ + "\u009b\t\6\2\2\u0099\u009c\5\64\33\2\u009a\u009c\5X-\2\u009b\u0099\3\2"+ + "\2\2\u009b\u009a\3\2\2\2\u009c\u00cf\3\2\2\2\u009d\u009e\7>\2\2\u009e"+ + "\u00a0\7!\2\2\u009f\u00a1\5\64\33\2\u00a0\u009f\3\2\2\2\u00a0\u00a1\3"+ + "\2\2\2\u00a1\u00cf\3\2\2\2\u00a2\u00a3\7>\2\2\u00a3\u00cf\7<\2\2\u00a4"+ + "\u00a5\7?\2\2\u00a5\u00cf\7\22\2\2\u00a6\u00a7\7?\2\2\u00a7\u00aa\7A\2"+ + "\2\u00a8\u00a9\7\21\2\2\u00a9\u00ab\5\64\33\2\u00aa\u00a8\3\2\2\2\u00aa"+ + "\u00ab\3\2\2\2\u00ab\u00ae\3\2\2\2\u00ac\u00af\5\64\33\2\u00ad\u00af\5"+ + "X-\2\u00ae\u00ac\3\2\2\2\u00ae\u00ad\3\2\2\2\u00ae\u00af\3\2\2\2\u00af"+ + "\u00b9\3\2\2\2\u00b0\u00b1\7D\2\2\u00b1\u00b6\5`\61\2\u00b2\u00b3\7\5"+ + "\2\2\u00b3\u00b5\5`\61\2\u00b4\u00b2\3\2\2\2\u00b5\u00b8\3\2\2\2\u00b6"+ + "\u00b4\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00ba\3\2\2\2\u00b8\u00b6\3\2"+ + "\2\2\u00b9\u00b0\3\2\2\2\u00b9\u00ba\3\2\2\2\u00ba\u00cf\3\2\2\2\u00bb"+ + "\u00bc\7?\2\2\u00bc\u00bf\7\23\2\2\u00bd\u00be\7\21\2\2\u00be\u00c0\5"+ + "`\61\2\u00bf\u00bd\3\2\2\2\u00bf\u00c0\3\2\2\2\u00c0\u00c4\3\2\2\2\u00c1"+ + "\u00c2\7@\2\2\u00c2\u00c5\5\64\33\2\u00c3\u00c5\5X-\2\u00c4\u00c1\3\2"+ + "\2\2\u00c4\u00c3\3\2\2\2\u00c4\u00c5\3\2\2\2\u00c5\u00c7\3\2\2\2\u00c6"+ + "\u00c8\5\64\33\2\u00c7\u00c6\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8\u00cf\3"+ + "\2\2\2\u00c9\u00ca\7?\2\2\u00ca\u00cf\7E\2\2\u00cb\u00cc\7?\2\2\u00cc"+ + "\u00cd\7@\2\2\u00cd\u00cf\7E\2\2\u00cej\3\2\2\2\u00cek\3\2\2\2\u00ce|"+ + "\3\2\2\2\u00ce\u008b\3\2\2\2\u00ce\u0091\3\2\2\2\u00ce\u0098\3\2\2\2\u00ce"+ + "\u009d\3\2\2\2\u00ce\u00a2\3\2\2\2\u00ce\u00a4\3\2\2\2\u00ce\u00a6\3\2"+ + "\2\2\u00ce\u00bb\3\2\2\2\u00ce\u00c9\3\2\2\2\u00ce\u00cb\3\2\2\2\u00cf"+ + "\7\3\2\2\2\u00d0\u00d1\7I\2\2\u00d1\u00d6\5\34\17\2\u00d2\u00d3\7\5\2"+ + "\2\u00d3\u00d5\5\34\17\2\u00d4\u00d2\3\2\2\2\u00d5\u00d8\3\2\2\2\u00d6"+ + "\u00d4\3\2\2\2\u00d6\u00d7\3\2\2\2\u00d7\u00da\3\2\2\2\u00d8\u00d6\3\2"+ + "\2\2\u00d9\u00d0\3\2\2\2\u00d9\u00da\3\2\2\2\u00da\u00db\3\2\2\2\u00db"+ "\u00dc\5\n\6\2\u00dc\t\3\2\2\2\u00dd\u00e8\5\16\b\2\u00de\u00df\7\64\2"+ "\2\u00df\u00e0\7\17\2\2\u00e0\u00e5\5\20\t\2\u00e1\u00e2\7\5\2\2\u00e2"+ "\u00e4\5\20\t\2\u00e3\u00e1\3\2\2\2\u00e4\u00e7\3\2\2\2\u00e5\u00e3\3"+ @@ -6152,17 +6139,19 @@ private boolean valueExpression_sempred(ValueExpressionContext _localctx, int pr "\7f\2\2\u02c5\u02c7\7g\2\2\u02c6\u02c4\3\2\2\2\u02c6\u02c5\3\2\2\2\u02c7"+ "[\3\2\2\2\u02c8\u02cc\7c\2\2\u02c9\u02cc\5b\62\2\u02ca\u02cc\7d\2\2\u02cb"+ "\u02c8\3\2\2\2\u02cb\u02c9\3\2\2\2\u02cb\u02ca\3\2\2\2\u02cc]\3\2\2\2"+ - "\u02cd\u02d0\7b\2\2\u02ce\u02d0\7a\2\2\u02cf\u02cd\3\2\2\2\u02cf\u02ce"+ - "\3\2\2\2\u02d0_\3\2\2\2\u02d1\u02d2\t\16\2\2\u02d2a\3\2\2\2\u02d3\u02d4"+ - "\t\17\2\2\u02d4c\3\2\2\2bsuy\u0082\u0084\u0088\u008f\u0096\u009b\u00a0"+ - "\u00aa\u00ae\u00b6\u00b9\u00bf\u00c4\u00c7\u00ce\u00d6\u00d9\u00e5\u00e8"+ - "\u00eb\u00f2\u00f9\u00fd\u0101\u0108\u010c\u0110\u0115\u0119\u0121\u0125"+ - "\u012c\u0137\u013a\u013e\u014a\u014d\u0153\u015a\u0161\u0164\u0168\u016c"+ - "\u0170\u0172\u017d\u0182\u0186\u0189\u018f\u0192\u0198\u019b\u019d\u01b0"+ - "\u01be\u01cc\u01d2\u01da\u01dc\u01e1\u01e4\u01ec\u01f5\u01fb\u0203\u0208"+ - "\u020e\u0211\u0218\u0220\u0226\u0232\u0234\u023e\u024c\u0253\u0261\u026f"+ - "\u0274\u027b\u027e\u0285\u028d\u02a0\u02ad\u02b4\u02b9\u02bf\u02c2\u02c6"+ - "\u02cb\u02cf"; + "\u02cd\u02cf\t\n\2\2\u02ce\u02cd\3\2\2\2\u02ce\u02cf\3\2\2\2\u02cf\u02d0"+ + "\3\2\2\2\u02d0\u02d6\7b\2\2\u02d1\u02d3\t\n\2\2\u02d2\u02d1\3\2\2\2\u02d2"+ + "\u02d3\3\2\2\2\u02d3\u02d4\3\2\2\2\u02d4\u02d6\7a\2\2\u02d5\u02ce\3\2"+ + "\2\2\u02d5\u02d2\3\2\2\2\u02d6_\3\2\2\2\u02d7\u02d8\t\16\2\2\u02d8a\3"+ + "\2\2\2\u02d9\u02da\t\17\2\2\u02dac\3\2\2\2dsuy\u0082\u0084\u0088\u008f"+ + "\u0096\u009b\u00a0\u00aa\u00ae\u00b6\u00b9\u00bf\u00c4\u00c7\u00ce\u00d6"+ + "\u00d9\u00e5\u00e8\u00eb\u00f2\u00f9\u00fd\u0101\u0108\u010c\u0110\u0115"+ + "\u0119\u0121\u0125\u012c\u0137\u013a\u013e\u014a\u014d\u0153\u015a\u0161"+ + "\u0164\u0168\u016c\u0170\u0172\u017d\u0182\u0186\u0189\u018f\u0192\u0198"+ + "\u019b\u019d\u01b0\u01be\u01cc\u01d2\u01da\u01dc\u01e1\u01e4\u01ec\u01f5"+ + "\u01fb\u0203\u0208\u020e\u0211\u0218\u0220\u0226\u0232\u0234\u023e\u024c"+ + "\u0253\u0261\u026f\u0274\u027b\u027e\u0285\u028d\u02a0\u02ad\u02b4\u02b9"+ + "\u02bf\u02c2\u02c6\u02cb\u02ce\u02d2\u02d5"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java index ecb5b83896eb2..004118e8cd2d9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; -import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Neg; import org.elasticsearch.xpack.sql.type.DataType; public class ExpressionTests extends ESTestCase { @@ -23,6 +22,30 @@ public void testTokenFunctionName() throws Exception { assertEquals("LEFT", uf.functionName()); } + public void testLiteralDouble() throws Exception { + Expression lt = parser.createExpression(String.valueOf(Double.MAX_VALUE)); + assertEquals(Literal.class, lt.getClass()); + Literal l = (Literal) lt; + assertEquals(Double.MAX_VALUE, l.value()); + assertEquals(DataType.DOUBLE, l.dataType()); + } + + public void testLiteralDoubleNegative() throws Exception { + Expression lt = parser.createExpression(String.valueOf(Double.MIN_VALUE)); + assertEquals(Literal.class, lt.getClass()); + Literal l = (Literal) lt; + assertEquals(Double.MIN_VALUE, l.value()); + assertEquals(DataType.DOUBLE, l.dataType()); + } + + public void testLiteralDoublePositive() throws Exception { + Expression lt = parser.createExpression("+" + Double.MAX_VALUE); + assertEquals(Literal.class, lt.getClass()); + Literal l = (Literal) lt; + assertEquals(Double.MAX_VALUE, l.value()); + assertEquals(DataType.DOUBLE, l.dataType()); + } + public void testLiteralLong() throws Exception { Expression lt = parser.createExpression(String.valueOf(Long.MAX_VALUE)); assertEquals(Literal.class, lt.getClass()); @@ -32,13 +55,18 @@ public void testLiteralLong() throws Exception { } public void testLiteralLongNegative() throws Exception { - // Long.MIN_VALUE doesn't work since it is being interpreted as negate positive.long which is 1 higher than Long.MAX_VALUE - Expression lt = parser.createExpression(String.valueOf(-Long.MAX_VALUE)); - assertEquals(Neg.class, lt.getClass()); - Neg n = (Neg) lt; - assertTrue(n.foldable()); - assertEquals(-Long.MAX_VALUE, n.fold()); - assertEquals(DataType.LONG, n.dataType()); + Expression lt = parser.createExpression(String.valueOf(Long.MIN_VALUE)); + assertTrue(lt.foldable()); + assertEquals(Long.MIN_VALUE, lt.fold()); + assertEquals(DataType.LONG, lt.dataType()); + } + + public void testLiteralLongPositive() throws Exception { + Expression lt = parser.createExpression("+" + String.valueOf(Long.MAX_VALUE)); + assertEquals(Literal.class, lt.getClass()); + Literal l = (Literal) lt; + assertEquals(Long.MAX_VALUE, l.value()); + assertEquals(DataType.LONG, l.dataType()); } public void testLiteralInteger() throws Exception { From 2fa09f062e5fe4165b457fb2b42ff4b65acdb8c7 Mon Sep 17 00:00:00 2001 From: markharwood Date: Tue, 18 Sep 2018 10:25:27 +0100 Subject: [PATCH 13/46] New plugin - Annotated_text field type (#30364) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New plugin for annotated_text field type. Largely a copy of `text` field type but adds ability to include markdown-like syntax in the text. The “AnnotatedText” class parses text+markup and converts into plain text and AnnotationTokens. The annotation token values are injected unchanged alongside the regular text tokens to provide a form of additional indexed overlay useful in positional searches and highlighting. Annotated_text fields do not support fielddata as we want to phase this out. Also includes a new "annotated" highlighter type that retains annotations and merges in search hits as additional annotation markup. Closes #29467 --- docs/plugins/mapper-annotated-text.asciidoc | 328 ++++++++ docs/plugins/mapper.asciidoc | 8 + docs/reference/cat/plugins.asciidoc | 1 + docs/reference/mapping/types.asciidoc | 1 + plugins/mapper-annotated-text/build.gradle | 23 + .../AnnotatedTextFieldMapper.java | 776 ++++++++++++++++++ .../plugin/mapper/AnnotatedTextPlugin.java | 44 + .../highlight/AnnotatedPassageFormatter.java | 201 +++++ .../highlight/AnnotatedTextHighlighter.java | 64 ++ .../AnnotatedTextClientYamlTestSuiteIT.java | 39 + .../AnnotatedTextFieldMapperTests.java | 681 +++++++++++++++ .../AnnotatedTextParsingTests.java | 73 ++ .../AnnotatedTextHighlighterTests.java | 185 +++++ .../test/mapper_annotatedtext/10_basic.yml | 44 + .../tests/module_and_plugin_test_cases.bash | 8 + .../subphase/highlight/HighlightUtils.java | 19 +- .../subphase/highlight/PlainHighlighter.java | 3 +- .../highlight/UnifiedHighlighter.java | 55 +- 18 files changed, 2523 insertions(+), 30 deletions(-) create mode 100644 docs/plugins/mapper-annotated-text.asciidoc create mode 100644 plugins/mapper-annotated-text/build.gradle create mode 100644 plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java create mode 100644 plugins/mapper-annotated-text/src/main/java/org/elasticsearch/plugin/mapper/AnnotatedTextPlugin.java create mode 100644 plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java create mode 100644 plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java create mode 100644 plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java create mode 100644 plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java create mode 100644 plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextParsingTests.java create mode 100644 plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java create mode 100644 plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml diff --git a/docs/plugins/mapper-annotated-text.asciidoc b/docs/plugins/mapper-annotated-text.asciidoc new file mode 100644 index 0000000000000..4528168a4d643 --- /dev/null +++ b/docs/plugins/mapper-annotated-text.asciidoc @@ -0,0 +1,328 @@ +[[mapper-annotated-text]] +=== Mapper Annotated Text Plugin + +experimental[] + +The mapper-annotated-text plugin provides the ability to index text that is a +combination of free-text and special markup that is typically used to identify +items of interest such as people or organisations (see NER or Named Entity Recognition +tools). + + +The elasticsearch markup allows one or more additional tokens to be injected, unchanged, into the token +stream at the same position as the underlying text it annotates. + +:plugin_name: mapper-annotated-text +include::install_remove.asciidoc[] + +[[mapper-annotated-text-usage]] +==== Using the `annotated-text` field + +The `annotated-text` tokenizes text content as per the more common `text` field (see +"limitations" below) but also injects any marked-up annotation tokens directly into +the search index: + +[source,js] +-------------------------- +PUT my_index +{ + "mappings": { + "_doc": { + "properties": { + "my_field": { + "type": "annotated_text" + } + } + } + } +} +-------------------------- +// CONSOLE + +Such a mapping would allow marked-up text eg wikipedia articles to be indexed as both text +and structured tokens. The annotations use a markdown-like syntax using URL encoding of +one or more values separated by the `&` symbol. + + +We can use the "_analyze" api to test how an example annotation would be stored as tokens +in the search index: + + +[source,js] +-------------------------- +GET my_index/_analyze +{ + "field": "my_field", + "text":"Investors in [Apple](Apple+Inc.) rejoiced." +} +-------------------------- +// NOTCONSOLE + +Response: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "investors", + "start_offset": 0, + "end_offset": 9, + "type": "", + "position": 0 + }, + { + "token": "in", + "start_offset": 10, + "end_offset": 12, + "type": "", + "position": 1 + }, + { + "token": "Apple Inc.", <1> + "start_offset": 13, + "end_offset": 18, + "type": "annotation", + "position": 2 + }, + { + "token": "apple", + "start_offset": 13, + "end_offset": 18, + "type": "", + "position": 2 + }, + { + "token": "rejoiced", + "start_offset": 19, + "end_offset": 27, + "type": "", + "position": 3 + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +<1> Note the whole annotation token `Apple Inc.` is placed, unchanged as a single token in +the token stream and at the same position (position 2) as the text token (`apple`) it annotates. + + +We can now perform searches for annotations using regular `term` queries that don't tokenize +the provided search values. Annotations are a more precise way of matching as can be seen +in this example where a search for `Beck` will not match `Jeff Beck` : + +[source,js] +-------------------------- +# Example documents +PUT my_index/_doc/1 +{ + "my_field": "[Beck](Beck) announced a new tour"<2> +} + +PUT my_index/_doc/2 +{ + "my_field": "[Jeff Beck](Jeff+Beck&Guitarist) plays a strat"<1> +} + +# Example search +GET my_index/_search +{ + "query": { + "term": { + "my_field": "Beck" <3> + } + } +} +-------------------------- +// CONSOLE + +<1> As well as tokenising the plain text into single words e.g. `beck`, here we +inject the single token value `Beck` at the same position as `beck` in the token stream. +<2> Note annotations can inject multiple tokens at the same position - here we inject both +the very specific value `Jeff Beck` and the broader term `Guitarist`. This enables +broader positional queries e.g. finding mentions of a `Guitarist` near to `strat`. +<3> A benefit of searching with these carefully defined annotation tokens is that a query for +`Beck` will not match document 2 that contains the tokens `jeff`, `beck` and `Jeff Beck` + +WARNING: Any use of `=` signs in annotation values eg `[Prince](person=Prince)` will +cause the document to be rejected with a parse failure. In future we hope to have a use for +the equals signs so wil actively reject documents that contain this today. + + +[[mapper-annotated-text-tips]] +==== Data modelling tips +===== Use structured and unstructured fields + +Annotations are normally a way of weaving structured information into unstructured text for +higher-precision search. + +`Entity resolution` is a form of document enrichment undertaken by specialist software or people +where references to entities in a document are disambiguated by attaching a canonical ID. +The ID is used to resolve any number of aliases or distinguish between people with the +same name. The hyperlinks connecting Wikipedia's articles are a good example of resolved +entity IDs woven into text. + +These IDs can be embedded as annotations in an annotated_text field but it often makes +sense to include them in dedicated structured fields to support discovery via aggregations: + +[source,js] +-------------------------- +PUT my_index +{ + "mappings": { + "_doc": { + "properties": { + "my_unstructured_text_field": { + "type": "annotated_text" + }, + "my_structured_people_field": { + "type": "text", + "fields": { + "keyword" :{ + "type": "keyword" + } + } + } + } + } + } +} +-------------------------- +// CONSOLE + +Applications would then typically provide content and discover it as follows: + +[source,js] +-------------------------- +# Example documents +PUT my_index/_doc/1 +{ + "my_unstructured_text_field": "[Shay](%40kimchy) created elasticsearch", + "my_twitter_handles": ["@kimchy"] <1> +} + +GET my_index/_search +{ + "query": { + "query_string": { + "query": "elasticsearch OR logstash OR kibana",<2> + "default_field": "my_unstructured_text_field" + } + }, + "aggregations": { + "top_people" :{ + "significant_terms" : { <3> + "field" : "my_twitter_handles.keyword" + } + } + } +} +-------------------------- +// CONSOLE + +<1> Note the `my_twitter_handles` contains a list of the annotation values +also used in the unstructured text. (Note the annotated_text syntax requires escaping). +By repeating the annotation values in a structured field this application has ensured that +the tokens discovered in the structured field can be used for search and highlighting +in the unstructured field. +<2> In this example we search for documents that talk about components of the elastic stack +<3> We use the `my_twitter_handles` field here to discover people who are significantly +associated with the elastic stack. + +===== Avoiding over-matching annotations +By design, the regular text tokens and the annotation tokens co-exist in the same indexed +field but in rare cases this can lead to some over-matching. + +The value of an annotation often denotes a _named entity_ (a person, place or company). +The tokens for these named entities are inserted untokenized, and differ from typical text +tokens because they are normally: + +* Mixed case e.g. `Madonna` +* Multiple words e.g. `Jeff Beck` +* Can have punctuation or numbers e.g. `Apple Inc.` or `@kimchy` + +This means, for the most part, a search for a named entity in the annotated text field will +not have any false positives e.g. when selecting `Apple Inc.` from an aggregation result +you can drill down to highlight uses in the text without "over matching" on any text tokens +like the word `apple` in this context: + + the apple was very juicy + +However, a problem arises if your named entity happens to be a single term and lower-case e.g. the +company `elastic`. In this case, a search on the annotated text field for the token `elastic` +may match a text document such as this: + + he fired an elastic band + +To avoid such false matches users should consider prefixing annotation values to ensure +they don't name clash with text tokens e.g. + + [elastic](Company_elastic) released version 7.0 of the elastic stack today + + + + +[[mapper-annotated-text-highlighter]] +==== Using the `annotated` highlighter + +The `annotated-text` plugin includes a custom highlighter designed to mark up search hits +in a way which is respectful of the original markup: + +[source,js] +-------------------------- +# Example documents +PUT my_index/_doc/1 +{ + "my_field": "The cat sat on the [mat](sku3578)" +} + +GET my_index/_search +{ + "query": { + "query_string": { + "query": "cats" + } + }, + "highlight": { + "fields": { + "my_field": { + "type": "annotated", <1> + "require_field_match": false + } + } + } +} +-------------------------- +// CONSOLE +<1> The `annotated` highlighter type is designed for use with annotated_text fields + +The annotated highlighter is based on the `unified` highlighter and supports the same +settings but does not use the `pre_tags` or `post_tags` parameters. Rather than using +html-like markup such as `cat` the annotated highlighter uses the same +markdown-like syntax used for annotations and injects a key=value annotation where `_hit_term` +is the key and the matched search term is the value e.g. + + The [cat](_hit_term=cat) sat on the [mat](sku3578) + +The annotated highlighter tries to be respectful of any existing markup in the original +text: + +* If the search term matches exactly the location of an existing annotation then the +`_hit_term` key is merged into the url-like syntax used in the `(...)` part of the +existing annotation. +* However, if the search term overlaps the span of an existing annotation it would break +the markup formatting so the original annotation is removed in favour of a new annotation +with just the search hit information in the results. +* Any non-overlapping annotations in the original text are preserved in highlighter +selections + + +[[mapper-annotated-text-limitations]] +==== Limitations + +The annotated_text field type supports the same mapping settings as the `text` field type +but with the following exceptions: + +* No support for `fielddata` or `fielddata_frequency_filter` +* No support for `index_prefixes` or `index_phrases` indexing diff --git a/docs/plugins/mapper.asciidoc b/docs/plugins/mapper.asciidoc index 226fc4e40d000..4026a45c59e70 100644 --- a/docs/plugins/mapper.asciidoc +++ b/docs/plugins/mapper.asciidoc @@ -19,5 +19,13 @@ indexes the size in bytes of the original The mapper-murmur3 plugin allows hashes to be computed at index-time and stored in the index for later use with the `cardinality` aggregation. +<>:: + +The annotated text plugin provides the ability to index text that is a +combination of free-text and special markup that is typically used to identify +items of interest such as people or organisations (see NER or Named Entity Recognition +tools). + include::mapper-size.asciidoc[] include::mapper-murmur3.asciidoc[] +include::mapper-annotated-text.asciidoc[] diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index a9915d7aaa236..9cb8332183590 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -28,6 +28,7 @@ U7321H6 discovery-gce {version} The Google Compute Engine (GCE) Discov U7321H6 ingest-attachment {version} Ingest processor that uses Apache Tika to extract contents U7321H6 ingest-geoip {version} Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database U7321H6 ingest-user-agent {version} Ingest processor that extracts information from a user agent +U7321H6 mapper-annotated-text {version} The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index. U7321H6 mapper-murmur3 {version} The Mapper Murmur3 plugin allows to compute hashes of a field's values at index-time and to store them in the index. U7321H6 mapper-size {version} The Mapper Size plugin allows document to record their uncompressed size at index time. U7321H6 store-smb {version} The Store SMB plugin adds support for SMB stores. diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index fbd8181d0959a..9cd55bee8553b 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -35,6 +35,7 @@ string:: <> and <> `completion` to provide auto-complete suggestions <>:: `token_count` to count the number of tokens in a string {plugins}/mapper-murmur3.html[`mapper-murmur3`]:: `murmur3` to compute hashes of values at index-time and store them in the index +{plugins}/mapper-annotated-text.html[`mapper-annotated-text`]:: `annotated-text` to index text containing special markup (typically used for identifying named entities) <>:: Accepts queries from the query-dsl diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle new file mode 100644 index 0000000000000..8ce1ca2a416fe --- /dev/null +++ b/plugins/mapper-annotated-text/build.gradle @@ -0,0 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +esplugin { + description 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' + classname 'org.elasticsearch.plugin.mapper.AnnotatedTextPlugin' +} diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java new file mode 100644 index 0000000000000..8cc38d130ff52 --- /dev/null +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -0,0 +1,776 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.annotatedtext; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.Analyzer.TokenStreamComponents; +import org.apache.lucene.analysis.AnalyzerWrapper; +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; +import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; +import org.apache.lucene.analysis.tokenattributes.TypeAttribute; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.analysis.AnalyzerScope; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.StringFieldType; +import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText.AnnotationToken; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.io.Reader; +import java.io.StringReader; +import java.io.UncheckedIOException; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; + +/** A {@link FieldMapper} for full-text fields with annotation markup e.g. + * + * "New mayor is [John Smith](type=person&value=John%20Smith) " + * + * A special Analyzer wraps the default choice of analyzer in order + * to strip the text field of annotation markup and inject the related + * entity annotation tokens as supplementary tokens at the relevant points + * in the token stream. + * This code is largely a copy of TextFieldMapper which is less than ideal - + * my attempts to subclass TextFieldMapper failed but we can revisit this. + **/ +public class AnnotatedTextFieldMapper extends FieldMapper { + + public static final String CONTENT_TYPE = "annotated_text"; + private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new AnnotatedTextFieldType(); + static { + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends FieldMapper.Builder { + + private int positionIncrementGap = POSITION_INCREMENT_GAP_USE_ANALYZER; + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + builder = this; + } + + @Override + public AnnotatedTextFieldType fieldType() { + return (AnnotatedTextFieldType) super.fieldType(); + } + + public Builder positionIncrementGap(int positionIncrementGap) { + if (positionIncrementGap < 0) { + throw new MapperParsingException("[positions_increment_gap] must be positive, got " + positionIncrementGap); + } + this.positionIncrementGap = positionIncrementGap; + return this; + } + + @Override + public Builder docValues(boolean docValues) { + if (docValues) { + throw new IllegalArgumentException("[" + CONTENT_TYPE + "] fields do not support doc values"); + } + return super.docValues(docValues); + } + + @Override + public AnnotatedTextFieldMapper build(BuilderContext context) { + if (fieldType().indexOptions() == IndexOptions.NONE ) { + throw new IllegalArgumentException("[" + CONTENT_TYPE + "] fields must be indexed"); + } + if (positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) { + if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { + throw new IllegalArgumentException("Cannot set position_increment_gap on field [" + + name + "] without positions enabled"); + } + fieldType.setIndexAnalyzer(new NamedAnalyzer(fieldType.indexAnalyzer(), positionIncrementGap)); + fieldType.setSearchAnalyzer(new NamedAnalyzer(fieldType.searchAnalyzer(), positionIncrementGap)); + fieldType.setSearchQuoteAnalyzer(new NamedAnalyzer(fieldType.searchQuoteAnalyzer(), positionIncrementGap)); + } else { + //Using the analyzer's default BUT need to do the same thing AnalysisRegistry.processAnalyzerFactory + // does to splice in new default of posIncGap=100 by wrapping the analyzer + if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) { + int overrideInc = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP; + fieldType.setIndexAnalyzer(new NamedAnalyzer(fieldType.indexAnalyzer(), overrideInc)); + fieldType.setSearchAnalyzer(new NamedAnalyzer(fieldType.searchAnalyzer(), overrideInc)); + fieldType.setSearchQuoteAnalyzer(new NamedAnalyzer(fieldType.searchQuoteAnalyzer(),overrideInc)); + } + } + setupFieldType(context); + return new AnnotatedTextFieldMapper( + name, fieldType(), defaultFieldType, positionIncrementGap, + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); + } + } + + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse( + String fieldName, Map node, ParserContext parserContext) throws MapperParsingException { + AnnotatedTextFieldMapper.Builder builder = new AnnotatedTextFieldMapper.Builder(fieldName); + + builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer()); + builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer()); + builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer()); + parseTextField(builder, fieldName, node, parserContext); + for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + String propName = entry.getKey(); + Object propNode = entry.getValue(); + if (propName.equals("position_increment_gap")) { + int newPositionIncrementGap = XContentMapValues.nodeIntegerValue(propNode, -1); + builder.positionIncrementGap(newPositionIncrementGap); + iterator.remove(); + } + } + return builder; + } + } + + + /** + * Parses markdown-like syntax into plain text and AnnotationTokens with offsets for + * annotations found in texts + */ + public static final class AnnotatedText { + public final String textPlusMarkup; + public final String textMinusMarkup; + List annotations; + + // Format is markdown-like syntax for URLs eg: + // "New mayor is [John Smith](type=person&value=John%20Smith) " + static Pattern markdownPattern = Pattern.compile("\\[([^\\]\\[]*)\\]\\(([^\\)\\(]*)\\)"); + + public static AnnotatedText parse (String textPlusMarkup) { + List annotations =new ArrayList<>(); + Matcher m = markdownPattern.matcher(textPlusMarkup); + int lastPos = 0; + StringBuilder sb = new StringBuilder(); + while(m.find()){ + if(m.start() > lastPos){ + sb.append(textPlusMarkup.substring(lastPos, m.start())); + } + + int startOffset = sb.length(); + int endOffset = sb.length() + m.group(1).length(); + sb.append(m.group(1)); + lastPos = m.end(); + + String[] pairs = m.group(2).split("&"); + String value = null; + for (String pair : pairs) { + String[] kv = pair.split("="); + try { + if(kv.length == 2){ + throw new ElasticsearchParseException("key=value pairs are not supported in annotations"); + } + if(kv.length == 1) { + //Check "=" sign wasn't in the pair string + if(kv[0].length() == pair.length()) { + //untyped value + value = URLDecoder.decode(kv[0], "UTF-8"); + } + } + if (value!=null && value.length() > 0) { + annotations.add(new AnnotationToken(startOffset, endOffset, value)); + } + } catch (UnsupportedEncodingException uee){ + throw new ElasticsearchParseException("Unsupported encoding parsing annotated text", uee); + } + } + } + if(lastPos < textPlusMarkup.length()){ + sb.append(textPlusMarkup.substring(lastPos)); + } + return new AnnotatedText(sb.toString(), textPlusMarkup, annotations); + } + + protected AnnotatedText(String textMinusMarkup, String textPlusMarkup, List annotations) { + this.textMinusMarkup = textMinusMarkup; + this.textPlusMarkup = textPlusMarkup; + this.annotations = annotations; + } + + public static final class AnnotationToken { + public final int offset; + public final int endOffset; + + public final String value; + public AnnotationToken(int offset, int endOffset, String value) { + this.offset = offset; + this.endOffset = endOffset; + this.value = value; + } + @Override + public String toString() { + return value +" ("+offset+" - "+endOffset+")"; + } + + public boolean intersects(int start, int end) { + return (start <= offset && end >= offset) || (start <= endOffset && end >= endOffset) + || (start >= offset && end <= endOffset); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + endOffset; + result = prime * result + offset; + result = prime * result + Objects.hashCode(value); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + AnnotationToken other = (AnnotationToken) obj; + return Objects.equals(endOffset, other.endOffset) && Objects.equals(offset, other.offset) + && Objects.equals(value, other.value); + } + + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(textMinusMarkup); + sb.append("\n"); + annotations.forEach(a -> {sb.append(a); sb.append("\n");}); + return sb.toString(); + } + + public int numAnnotations() { + return annotations.size(); + } + + public AnnotationToken getAnnotation(int index) { + return annotations.get(index); + } + } + + // A utility class for use with highlighters where the content being highlighted + // needs plain text format for highlighting but marked-up format for token discovery. + // The class takes markedup format field values and returns plain text versions. + // When asked to tokenize plain-text versions by the highlighter it tokenizes the + // original markup form in order to inject annotations. + public static final class AnnotatedHighlighterAnalyzer extends AnalyzerWrapper { + private Analyzer delegate; + private AnnotatedText[] annotations; + public AnnotatedHighlighterAnalyzer(Analyzer delegate){ + super(delegate.getReuseStrategy()); + this.delegate = delegate; + } + + public void init(String[] markedUpFieldValues) { + this.annotations = new AnnotatedText[markedUpFieldValues.length]; + for (int i = 0; i < markedUpFieldValues.length; i++) { + annotations[i] = AnnotatedText.parse(markedUpFieldValues[i]); + } + } + + public String [] getPlainTextValuesForHighlighter(){ + String [] result = new String[annotations.length]; + for (int i = 0; i < annotations.length; i++) { + result[i] = annotations[i].textMinusMarkup; + } + return result; + } + + public AnnotationToken[] getIntersectingAnnotations(int start, int end) { + List intersectingAnnotations = new ArrayList<>(); + int fieldValueOffset =0; + for (AnnotatedText fieldValueAnnotations : this.annotations) { + //This is called from a highlighter where all of the field values are concatenated + // so each annotation offset will need to be adjusted so that it takes into account + // the previous values AND the MULTIVAL delimiter + for (AnnotationToken token : fieldValueAnnotations.annotations) { + if(token.intersects(start - fieldValueOffset , end - fieldValueOffset)) { + intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset, + token.endOffset + fieldValueOffset, token.value)); + } + } + //add 1 for the fieldvalue separator character + fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1; + } + return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]); + } + + @Override + public Analyzer getWrappedAnalyzer(String fieldName) { + return delegate; + } + + @Override + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + if(components instanceof AnnotatedHighlighterTokenStreamComponents){ + // already wrapped. + return components; + } + AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); + return new AnnotatedHighlighterTokenStreamComponents(components.getTokenizer(), injector, this.annotations); + } + } + private static final class AnnotatedHighlighterTokenStreamComponents extends TokenStreamComponents{ + + private AnnotationsInjector annotationsInjector; + private AnnotatedText[] annotations; + int readerNum = 0; + + AnnotatedHighlighterTokenStreamComponents(Tokenizer source, AnnotationsInjector annotationsFilter, + AnnotatedText[] annotations) { + super(source, annotationsFilter); + this.annotationsInjector = annotationsFilter; + this.annotations = annotations; + } + + @Override + protected void setReader(Reader reader) { + String plainText = readToString(reader); + AnnotatedText at = this.annotations[readerNum++]; + assert at.textMinusMarkup.equals(plainText); + // This code is reliant on the behaviour of highlighter logic - it + // takes plain text multi-value fields and then calls the same analyzer + // for each field value in turn. This class has cached the annotations + // associated with each plain-text value and are arranged in the same order + annotationsInjector.setAnnotations(at); + super.setReader(new StringReader(at.textMinusMarkup)); + } + + } + + + public static final class AnnotationAnalyzerWrapper extends AnalyzerWrapper { + + + private final Analyzer delegate; + + public AnnotationAnalyzerWrapper (Analyzer delegate) { + super(delegate.getReuseStrategy()); + this.delegate = delegate; + } + + /** + * Wraps {@link StandardAnalyzer}. + */ + public AnnotationAnalyzerWrapper() { + this(new StandardAnalyzer()); + } + + + @Override + public Analyzer getWrappedAnalyzer(String fieldName) { + return delegate; + } + + @Override + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + if(components instanceof AnnotatedTokenStreamComponents){ + // already wrapped. + return components; + } + AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); + return new AnnotatedTokenStreamComponents(components.getTokenizer(), injector); + } + } + + + //This Analyzer is not "wrappable" because of a limitation in Lucene https://issues.apache.org/jira/browse/LUCENE-8352 + private static final class AnnotatedTokenStreamComponents extends TokenStreamComponents{ + private AnnotationsInjector annotationsInjector; + + AnnotatedTokenStreamComponents(Tokenizer source, AnnotationsInjector annotationsInjector) { + super(source, annotationsInjector); + this.annotationsInjector = annotationsInjector; + } + + @Override + protected void setReader(Reader reader) { + // Sneaky code to change the content downstream components will parse. + // Replace the marked-up content Reader with a plain text Reader and prime the + // annotations injector with the AnnotatedTokens that need to be injected + // as plain-text parsing progresses. + AnnotatedText annotations = AnnotatedText.parse(readToString(reader)); + annotationsInjector.setAnnotations(annotations); + super.setReader(new StringReader(annotations.textMinusMarkup)); + } + } + + static String readToString(Reader reader) { + char[] arr = new char[8 * 1024]; + StringBuilder buffer = new StringBuilder(); + int numCharsRead; + try { + while ((numCharsRead = reader.read(arr, 0, arr.length)) != -1) { + buffer.append(arr, 0, numCharsRead); + } + reader.close(); + return buffer.toString(); + } catch (IOException e) { + throw new UncheckedIOException("IO Error reading field content", e); + } + } + + + public static final class AnnotationsInjector extends TokenFilter { + + private AnnotatedText annotatedText; + AnnotatedText.AnnotationToken nextAnnotationForInjection = null; + private int currentAnnotationIndex = 0; + List pendingStates = new ArrayList<>(); + int pendingStatePos = 0; + boolean inputExhausted = false; + + private final OffsetAttribute textOffsetAtt = addAttribute(OffsetAttribute.class); + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class); + private final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class); + private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class); + + public AnnotationsInjector(TokenStream in) { + super(in); + } + + public void setAnnotations(AnnotatedText annotatedText) { + this.annotatedText = annotatedText; + currentAnnotationIndex = 0; + if(annotatedText!=null && annotatedText.numAnnotations()>0){ + nextAnnotationForInjection = annotatedText.getAnnotation(0); + } else { + nextAnnotationForInjection = null; + } + } + + + + @Override + public void reset() throws IOException { + pendingStates.clear(); + pendingStatePos = 0; + inputExhausted = false; + super.reset(); + } + + // Abstracts if we are pulling from some pre-cached buffer of + // text tokens or directly from the wrapped TokenStream + private boolean internalNextToken() throws IOException{ + if (pendingStatePos < pendingStates.size()){ + restoreState(pendingStates.get(pendingStatePos)); + pendingStatePos ++; + if(pendingStatePos >=pendingStates.size()){ + pendingStatePos =0; + pendingStates.clear(); + } + return true; + } + if(inputExhausted) { + return false; + } + return input.incrementToken(); + } + + @Override + public boolean incrementToken() throws IOException { + if (internalNextToken()) { + if (nextAnnotationForInjection != null) { + // If we are at the right point to inject an annotation.... + if (textOffsetAtt.startOffset() >= nextAnnotationForInjection.offset) { + int firstSpannedTextPosInc = posAtt.getPositionIncrement(); + int annotationPosLen = 1; + + // Capture the text token's state for later replay - but + // with a zero pos increment so is same as annotation + // that is injected before it + posAtt.setPositionIncrement(0); + pendingStates.add(captureState()); + + while (textOffsetAtt.endOffset() <= nextAnnotationForInjection.endOffset) { + // Buffer up all the other tokens spanned by this annotation to determine length. + if (input.incrementToken()) { + if (textOffsetAtt.endOffset() <= nextAnnotationForInjection.endOffset + && textOffsetAtt.startOffset() < nextAnnotationForInjection.endOffset) { + annotationPosLen += posAtt.getPositionIncrement(); + } + pendingStates.add(captureState()); + } else { + inputExhausted = true; + break; + } + } + emitAnnotation(firstSpannedTextPosInc, annotationPosLen); + return true; + } + } + return true; + } else { + inputExhausted = true; + return false; + } + } + private void setType(AnnotationToken token) { + //Default annotation type - in future AnnotationTokens may contain custom type info + typeAtt.setType("annotation"); + } + + private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) throws IOException { + // Set the annotation's attributes + posLenAtt.setPositionLength(annotationPosLen); + textOffsetAtt.setOffset(nextAnnotationForInjection.offset, nextAnnotationForInjection.endOffset); + setType(nextAnnotationForInjection); + + // We may have multiple annotations at this location - stack them up + final int annotationOffset = nextAnnotationForInjection.offset; + final AnnotatedText.AnnotationToken firstAnnotationAtThisPos = nextAnnotationForInjection; + while (nextAnnotationForInjection != null && nextAnnotationForInjection.offset == annotationOffset) { + + + setType(nextAnnotationForInjection); + termAtt.resizeBuffer(nextAnnotationForInjection.value.length()); + termAtt.copyBuffer(nextAnnotationForInjection.value.toCharArray(), 0, nextAnnotationForInjection.value.length()); + + if (nextAnnotationForInjection == firstAnnotationAtThisPos) { + posAtt.setPositionIncrement(firstSpannedTextPosInc); + //Put at the head of the queue of tokens to be emitted + pendingStates.add(0, captureState()); + } else { + posAtt.setPositionIncrement(0); + //Put after the head of the queue of tokens to be emitted + pendingStates.add(1, captureState()); + } + + + // Flag the inject annotation as null to prevent re-injection. + currentAnnotationIndex++; + if (currentAnnotationIndex < annotatedText.numAnnotations()) { + nextAnnotationForInjection = annotatedText.getAnnotation(currentAnnotationIndex); + } else { + nextAnnotationForInjection = null; + } + } + // Now pop the first of many potential buffered tokens: + internalNextToken(); + } + + } + + + public static final class AnnotatedTextFieldType extends StringFieldType { + + public AnnotatedTextFieldType() { + setTokenized(true); + } + + protected AnnotatedTextFieldType(AnnotatedTextFieldType ref) { + super(ref); + } + + @Override + public void setIndexAnalyzer(NamedAnalyzer delegate) { + if(delegate.analyzer() instanceof AnnotationAnalyzerWrapper){ + // Already wrapped the Analyzer with an AnnotationAnalyzer + super.setIndexAnalyzer(delegate); + } else { + // Wrap the analyzer with an AnnotationAnalyzer that will inject required annotations + super.setIndexAnalyzer(new NamedAnalyzer(delegate.name(), AnalyzerScope.INDEX, + new AnnotationAnalyzerWrapper(delegate.analyzer()))); + } + } + + public AnnotatedTextFieldType clone() { + return new AnnotatedTextFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + if (omitNorms()) { + return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); + } + } + + @Override + public Query phraseQuery(String field, TokenStream stream, int slop, boolean enablePosIncrements) throws IOException { + PhraseQuery.Builder builder = new PhraseQuery.Builder(); + builder.setSlop(slop); + + TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); + PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class); + int position = -1; + + stream.reset(); + while (stream.incrementToken()) { + if (enablePosIncrements) { + position += posIncrAtt.getPositionIncrement(); + } + else { + position += 1; + } + builder.add(new Term(field, termAtt.getBytesRef()), position); + } + + return builder.build(); + } + + @Override + public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + + MultiPhraseQuery.Builder mpqb = new MultiPhraseQuery.Builder(); + mpqb.setSlop(slop); + + TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); + + PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class); + int position = -1; + + List multiTerms = new ArrayList<>(); + stream.reset(); + while (stream.incrementToken()) { + int positionIncrement = posIncrAtt.getPositionIncrement(); + + if (positionIncrement > 0 && multiTerms.size() > 0) { + if (enablePositionIncrements) { + mpqb.add(multiTerms.toArray(new Term[0]), position); + } else { + mpqb.add(multiTerms.toArray(new Term[0])); + } + multiTerms.clear(); + } + position += positionIncrement; + multiTerms.add(new Term(field, termAtt.getBytesRef())); + } + + if (enablePositionIncrements) { + mpqb.add(multiTerms.toArray(new Term[0]), position); + } else { + mpqb.add(multiTerms.toArray(new Term[0])); + } + return mpqb.build(); + } + } + + private int positionIncrementGap; + protected AnnotatedTextFieldMapper(String simpleName, AnnotatedTextFieldType fieldType, MappedFieldType defaultFieldType, + int positionIncrementGap, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); + assert fieldType.tokenized(); + assert fieldType.hasDocValues() == false; + this.positionIncrementGap = positionIncrementGap; + } + + @Override + protected AnnotatedTextFieldMapper clone() { + return (AnnotatedTextFieldMapper) super.clone(); + } + + public int getPositionIncrementGap() { + return this.positionIncrementGap; + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + final String value; + if (context.externalValueSet()) { + value = context.externalValue().toString(); + } else { + value = context.parser().textOrNull(); + } + + if (value == null) { + return; + } + + if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { + Field field = new Field(fieldType().name(), value, fieldType()); + fields.add(field); + if (fieldType().omitNorms()) { + createFieldNamesField(context, fields); + } + } + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + public AnnotatedTextFieldType fieldType() { + return (AnnotatedTextFieldType) super.fieldType(); + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + doXContentAnalyzers(builder, includeDefaults); + + if (includeDefaults || positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) { + builder.field("position_increment_gap", positionIncrementGap); + } + } +} diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/plugin/mapper/AnnotatedTextPlugin.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/plugin/mapper/AnnotatedTextPlugin.java new file mode 100644 index 0000000000000..c7abe5fb5f91e --- /dev/null +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/plugin/mapper/AnnotatedTextPlugin.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugin.mapper; + +import java.util.Collections; +import java.util.Map; + +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.fetch.subphase.highlight.AnnotatedTextHighlighter; +import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; + +public class AnnotatedTextPlugin extends Plugin implements MapperPlugin, SearchPlugin { + + @Override + public Map getMappers() { + return Collections.singletonMap(AnnotatedTextFieldMapper.CONTENT_TYPE, new AnnotatedTextFieldMapper.TypeParser()); + } + + @Override + public Map getHighlighters() { + return Collections.singletonMap(AnnotatedTextHighlighter.NAME, new AnnotatedTextHighlighter()); + } +} diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java new file mode 100644 index 0000000000000..ad1acc85031dd --- /dev/null +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java @@ -0,0 +1,201 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch.subphase.highlight; + +import org.apache.lucene.search.highlight.Encoder; +import org.apache.lucene.search.uhighlight.Passage; +import org.apache.lucene.search.uhighlight.PassageFormatter; +import org.apache.lucene.search.uhighlight.Snippet; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText.AnnotationToken; + +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +/** + * Custom passage formatter that : + * 1) marks up search hits in markdown-like syntax for URLs ({@link Snippet}) + * 2) injects any annotations from the original text that don't conflict with search hit highlighting + */ +public class AnnotatedPassageFormatter extends PassageFormatter { + + + public static final String SEARCH_HIT_TYPE = "_hit_term"; + private final Encoder encoder; + private AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer; + + public AnnotatedPassageFormatter(AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer, Encoder encoder) { + this.annotatedHighlighterAnalyzer = annotatedHighlighterAnalyzer; + this.encoder = encoder; + } + + static class MarkupPassage { + List markups = new ArrayList<>(); + int lastMarkupEnd = -1; + + public void addUnlessOverlapping(Markup newMarkup) { + + // Fast exit. + if(newMarkup.start > lastMarkupEnd) { + markups.add(newMarkup); + lastMarkupEnd = newMarkup.end; + return; + } + + // Check to see if this new markup overlaps with any prior + int index=0; + for (Markup existingMarkup: markups) { + if(existingMarkup.samePosition(newMarkup)) { + existingMarkup.merge(newMarkup); + return; + } + if(existingMarkup.overlaps(newMarkup)) { + // existing markup wins - we throw away the new markup that would span this position + return; + } + // markup list is in start offset order so we can insert at this position then shift others right + if(existingMarkup.isAfter(newMarkup)) { + markups.add(index, newMarkup); + return; + } + index++; + } + markups.add(newMarkup); + lastMarkupEnd = newMarkup.end; + } + + } + static class Markup { + int start; + int end; + String metadata; + Markup(int start, int end, String metadata) { + super(); + this.start = start; + this.end = end; + this.metadata = metadata; + } + boolean isAfter(Markup other) { + return start > other.end; + } + void merge(Markup newMarkup) { + // metadata is key1=value&key2=value&.... syntax used for urls + assert samePosition(newMarkup); + metadata += "&" + newMarkup.metadata; + } + boolean samePosition(Markup other) { + return this.start == other.start && this.end == other.end; + } + boolean overlaps(Markup other) { + return (start<=other.start && end >= other.start) + || (start <= other.end && end >=other.end) + || (start>=other.start && end<=other.end); + } + @Override + public String toString() { + return "Markup [start=" + start + ", end=" + end + ", metadata=" + metadata + "]"; + } + + + } + // Merge original annotations and search hits into a single set of markups for each passage + static MarkupPassage mergeAnnotations(AnnotationToken [] annotations, Passage passage){ + try { + MarkupPassage markupPassage = new MarkupPassage(); + + // Add search hits first - they take precedence over any other markup + for (int i = 0; i < passage.getNumMatches(); i++) { + int start = passage.getMatchStarts()[i]; + int end = passage.getMatchEnds()[i]; + String searchTerm = passage.getMatchTerms()[i].utf8ToString(); + Markup markup = new Markup(start, end, SEARCH_HIT_TYPE+"="+URLEncoder.encode(searchTerm, StandardCharsets.UTF_8.name())); + markupPassage.addUnlessOverlapping(markup); + } + + // Now add original text's annotations - ignoring any that might conflict with the search hits markup. + for (AnnotationToken token: annotations) { + int start = token.offset; + int end = token.endOffset; + if(start >= passage.getStartOffset() && end<=passage.getEndOffset()) { + String escapedValue = URLEncoder.encode(token.value, StandardCharsets.UTF_8.name()); + Markup markup = new Markup(start, end, escapedValue); + markupPassage.addUnlessOverlapping(markup); + } + } + return markupPassage; + + } catch (UnsupportedEncodingException e) { + // We should always have UTF-8 support + throw new IllegalStateException(e); + } + } + + + @Override + public Snippet[] format(Passage[] passages, String content) { + Snippet[] snippets = new Snippet[passages.length]; + + int pos; + int j = 0; + for (Passage passage : passages) { + AnnotationToken [] annotations = annotatedHighlighterAnalyzer.getIntersectingAnnotations(passage.getStartOffset(), + passage.getEndOffset()); + MarkupPassage mergedMarkup = mergeAnnotations(annotations, passage); + + StringBuilder sb = new StringBuilder(); + pos = passage.getStartOffset(); + for(Markup markup: mergedMarkup.markups) { + int start = markup.start; + int end = markup.end; + // its possible to have overlapping terms + if (start > pos) { + append(sb, content, pos, start); + } + if (end > pos) { + sb.append("["); + append(sb, content, Math.max(pos, start), end); + + sb.append("]("); + sb.append(markup.metadata); + sb.append(")"); + pos = end; + } + } + // its possible a "term" from the analyzer could span a sentence boundary. + append(sb, content, pos, Math.max(pos, passage.getEndOffset())); + //we remove the paragraph separator if present at the end of the snippet (we used it as separator between values) + if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) { + sb.deleteCharAt(sb.length() - 1); + } else if (sb.charAt(sb.length() - 1) == HighlightUtils.NULL_SEPARATOR) { + sb.deleteCharAt(sb.length() - 1); + } + //and we trim the snippets too + snippets[j++] = new Snippet(sb.toString().trim(), passage.getScore(), passage.getNumMatches() > 0); + } + return snippets; + } + + private void append(StringBuilder dest, String content, int start, int end) { + dest.append(encoder.encodeText(content.substring(start, end))); + } +} diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java new file mode 100644 index 0000000000000..d93316c78921a --- /dev/null +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch.subphase.highlight; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.search.highlight.Encoder; +import org.apache.lucene.search.uhighlight.PassageFormatter; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer; +import org.elasticsearch.search.fetch.FetchSubPhase.HitContext; +import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight.Field; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +public class AnnotatedTextHighlighter extends UnifiedHighlighter { + + public static final String NAME = "annotated"; + + AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer = null; + + @Override + protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) { + annotatedHighlighterAnalyzer = new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, type)); + return annotatedHighlighterAnalyzer; + } + + // Convert the marked-up values held on-disk to plain-text versions for highlighting + @Override + protected List loadFieldValues(MappedFieldType fieldType, Field field, SearchContext context, HitContext hitContext) + throws IOException { + List fieldValues = super.loadFieldValues(fieldType, field, context, hitContext); + String[] fieldValuesAsString = fieldValues.toArray(new String[fieldValues.size()]); + annotatedHighlighterAnalyzer.init(fieldValuesAsString); + return Arrays.asList((Object[]) annotatedHighlighterAnalyzer.getPlainTextValuesForHighlighter()); + } + + @Override + protected PassageFormatter getPassageFormatter(SearchContextHighlight.Field field, Encoder encoder) { + return new AnnotatedPassageFormatter(annotatedHighlighterAnalyzer, encoder); + + } + +} diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..3d643b2a7ca41 --- /dev/null +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.annotatedtext; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class AnnotatedTextClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public AnnotatedTextClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(); + } +} + diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java new file mode 100644 index 0000000000000..8a51b9a494b16 --- /dev/null +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java @@ -0,0 +1,681 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.annotatedtext; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.IndexableFieldType; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.termvectors.TermVectorsRequest; +import org.elasticsearch.action.termvectors.TermVectorsResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.termvectors.TermVectorsService; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugin.mapper.AnnotatedTextPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void setup() { + Settings settings = Settings.builder() + .put("index.analysis.filter.mySynonyms.type", "synonym") + .putList("index.analysis.filter.mySynonyms.synonyms", Collections.singletonList("car, auto")) + .put("index.analysis.analyzer.synonym.tokenizer", "standard") + .put("index.analysis.analyzer.synonym.filter", "mySynonyms") + // Stop filter remains in server as it is part of lucene-core + .put("index.analysis.analyzer.my_stop_analyzer.tokenizer", "standard") + .put("index.analysis.analyzer.my_stop_analyzer.filter", "stop") + .build(); + indexService = createIndex("test", settings); + parser = indexService.mapperService().documentMapperParser(); + } + + + + @Override + protected Collection> getPlugins() { + List> classpathPlugins = new ArrayList<>(); + classpathPlugins.add(AnnotatedTextPlugin.class); + return classpathPlugins; + } + + + + protected String getFieldType() { + return "annotated_text"; + } + + public void testAnnotationInjection() throws IOException { + + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = indexService.mapperService().merge("type", + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + + // Use example of typed and untyped annotations + String annotatedText = "He paid [Stormy Daniels](Stephanie+Clifford&Payee) hush money"; + SourceToParse sourceToParse = SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", annotatedText) + .endObject()), + XContentType.JSON); + ParsedDocument doc = mapper.parse(sourceToParse); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + + assertEquals(annotatedText, fields[0].stringValue()); + + IndexShard shard = indexService.getShard(0); + shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + shard.refresh("test"); + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); + TermsEnum terms = leaf.terms("field").iterator(); + + assertTrue(terms.seekExact(new BytesRef("stormy"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(2, postings.nextPosition()); + + assertTrue(terms.seekExact(new BytesRef("Stephanie Clifford"))); + postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(2, postings.nextPosition()); + + assertTrue(terms.seekExact(new BytesRef("Payee"))); + postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(2, postings.nextPosition()); + + + assertTrue(terms.seekExact(new BytesRef("hush"))); + postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(4, postings.nextPosition()); + + } + } + + public void testToleranceForBadAnnotationMarkup() throws IOException { + + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = indexService.mapperService().merge("type", + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + + String annotatedText = "foo [bar](MissingEndBracket baz"; + SourceToParse sourceToParse = SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", annotatedText) + .endObject()), + XContentType.JSON); + ParsedDocument doc = mapper.parse(sourceToParse); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + + assertEquals(annotatedText, fields[0].stringValue()); + + IndexShard shard = indexService.getShard(0); + shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + shard.refresh("test"); + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); + TermsEnum terms = leaf.terms("field").iterator(); + + assertTrue(terms.seekExact(new BytesRef("foo"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(0, postings.nextPosition()); + + assertTrue(terms.seekExact(new BytesRef("bar"))); + postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(1, postings.nextPosition()); + + assertFalse(terms.seekExact(new BytesRef("MissingEndBracket"))); + // Bad markup means value is treated as plain text and fed through tokenisation + assertTrue(terms.seekExact(new BytesRef("missingendbracket"))); + + } + } + + public void testAgainstTermVectorsAPI() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("tvfield").field("type", getFieldType()) + .field("term_vector", "with_positions_offsets_payloads") + .endObject().endObject() + .endObject().endObject()); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + + + int max = between(3, 10); + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < max; i++) { + bulk.add(client().prepareIndex("test", "type", Integer.toString(i)) + .setSource("tvfield", "the quick [brown](Color) fox jumped over the lazy dog")); + } + bulk.get(); + + TermVectorsRequest request = new TermVectorsRequest("test", "type", "0").termStatistics(true); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService(resolveIndex("test")); + IndexShard shard = test.getShardOrNull(0); + assertThat(shard, notNullValue()); + TermVectorsResponse response = TermVectorsService.getTermVectors(shard, request); + assertEquals(1, response.getFields().size()); + + Terms terms = response.getFields().terms("tvfield"); + TermsEnum iterator = terms.iterator(); + BytesRef term; + Set foundTerms = new HashSet<>(); + while ((term = iterator.next()) != null) { + foundTerms.add(term.utf8ToString()); + } + //Check we have both text and annotation tokens + assertTrue(foundTerms.contains("brown")); + assertTrue(foundTerms.contains("Color")); + assertTrue(foundTerms.contains("fox")); + + } + + // ===== Code below copied from TextFieldMapperTests ======== + + public void testDefaults() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + + assertEquals("1234", fields[0].stringValue()); + IndexableFieldType fieldType = fields[0].fieldType(); + assertThat(fieldType.omitNorms(), equalTo(false)); + assertTrue(fieldType.tokenized()); + assertFalse(fieldType.stored()); + assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)); + assertThat(fieldType.storeTermVectors(), equalTo(false)); + assertThat(fieldType.storeTermVectorOffsets(), equalTo(false)); + assertThat(fieldType.storeTermVectorPositions(), equalTo(false)); + assertThat(fieldType.storeTermVectorPayloads(), equalTo(false)); + assertEquals(DocValuesType.NONE, fieldType.docValuesType()); + } + + public void testEnableStore() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", getFieldType()).field("store", true).endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + assertTrue(fields[0].fieldType().stored()); + } + + public void testDisableNorms() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", getFieldType()) + .field("norms", false) + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + assertTrue(fields[0].fieldType().omitNorms()); + } + + public void testIndexOptions() throws IOException { + Map supportedOptions = new HashMap<>(); + supportedOptions.put("docs", IndexOptions.DOCS); + supportedOptions.put("freqs", IndexOptions.DOCS_AND_FREQS); + supportedOptions.put("positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + supportedOptions.put("offsets", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); + + XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties"); + for (String option : supportedOptions.keySet()) { + mappingBuilder.startObject(option).field("type", getFieldType()).field("index_options", option).endObject(); + } + String mapping = Strings.toString(mappingBuilder.endObject().endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + XContentBuilder jsonDoc = XContentFactory.jsonBuilder().startObject(); + for (String option : supportedOptions.keySet()) { + jsonDoc.field(option, "1234"); + } + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(jsonDoc.endObject()), + XContentType.JSON)); + + for (Map.Entry entry : supportedOptions.entrySet()) { + String field = entry.getKey(); + IndexOptions options = entry.getValue(); + IndexableField[] fields = doc.rootDoc().getFields(field); + assertEquals(1, fields.length); + assertEquals(options, fields[0].fieldType().indexOptions()); + } + } + + public void testDefaultPositionIncrementGap() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = indexService.mapperService().merge("type", + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + + assertEquals(mapping, mapper.mappingSource().toString()); + + SourceToParse sourceToParse = SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .array("field", new String[] {"a", "b"}) + .endObject()), + XContentType.JSON); + ParsedDocument doc = mapper.parse(sourceToParse); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + + assertEquals("a", fields[0].stringValue()); + assertEquals("b", fields[1].stringValue()); + + IndexShard shard = indexService.getShard(0); + shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + shard.refresh("test"); + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); + TermsEnum terms = leaf.terms("field").iterator(); + assertTrue(terms.seekExact(new BytesRef("b"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + 1, postings.nextPosition()); + } + } + + public void testPositionIncrementGap() throws IOException { + final int positionIncrementGap = randomIntBetween(1, 1000); + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", getFieldType()) + .field("position_increment_gap", positionIncrementGap) + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = indexService.mapperService().merge("type", + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + + assertEquals(mapping, mapper.mappingSource().toString()); + + SourceToParse sourceToParse = SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .array("field", new String[]{"a", "b"}) + .endObject()), + XContentType.JSON); + ParsedDocument doc = mapper.parse(sourceToParse); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + + assertEquals("a", fields[0].stringValue()); + assertEquals("b", fields[1].stringValue()); + + IndexShard shard = indexService.getShard(0); + shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + shard.refresh("test"); + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); + TermsEnum terms = leaf.terms("field").iterator(); + assertTrue(terms.seekExact(new BytesRef("b"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(positionIncrementGap + 1, postings.nextPosition()); + } + } + + public void testSearchAnalyzerSerialization() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", getFieldType()) + .field("analyzer", "standard") + .field("search_analyzer", "keyword") + .endObject() + .endObject().endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); + + // special case: default index analyzer + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", getFieldType()) + .field("analyzer", "default") + .field("search_analyzer", "keyword") + .endObject() + .endObject().endObject().endObject()); + + mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); + + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", getFieldType()) + .field("analyzer", "keyword") + .endObject() + .endObject().endObject().endObject()); + + mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); + + // special case: default search analyzer + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", getFieldType()) + .field("analyzer", "keyword") + .field("search_analyzer", "default") + .endObject() + .endObject().endObject().endObject()); + + mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); + + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", getFieldType()) + .field("analyzer", "keyword") + .endObject() + .endObject().endObject().endObject()); + mapper = parser.parse("type", new CompressedXContent(mapping)); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + mapper.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true"))); + builder.endObject(); + + String mappingString = Strings.toString(builder); + assertTrue(mappingString.contains("analyzer")); + assertTrue(mappingString.contains("search_analyzer")); + assertTrue(mappingString.contains("search_quote_analyzer")); + } + + public void testSearchQuoteAnalyzerSerialization() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", getFieldType()) + .field("analyzer", "standard") + .field("search_analyzer", "standard") + .field("search_quote_analyzer", "keyword") + .endObject() + .endObject().endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); + + // special case: default index/search analyzer + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", getFieldType()) + .field("analyzer", "default") + .field("search_analyzer", "default") + .field("search_quote_analyzer", "keyword") + .endObject() + .endObject().endObject().endObject()); + + mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); + } + + public void testTermVectors() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field1") + .field("type", getFieldType()) + .field("term_vector", "no") + .endObject() + .startObject("field2") + .field("type", getFieldType()) + .field("term_vector", "yes") + .endObject() + .startObject("field3") + .field("type", getFieldType()) + .field("term_vector", "with_offsets") + .endObject() + .startObject("field4") + .field("type", getFieldType()) + .field("term_vector", "with_positions") + .endObject() + .startObject("field5") + .field("type", getFieldType()) + .field("term_vector", "with_positions_offsets") + .endObject() + .startObject("field6") + .field("type", getFieldType()) + .field("term_vector", "with_positions_offsets_payloads") + .endObject() + .endObject() + .endObject().endObject()); + + DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); + + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field1", "1234") + .field("field2", "1234") + .field("field3", "1234") + .field("field4", "1234") + .field("field5", "1234") + .field("field6", "1234") + .endObject()), + XContentType.JSON)); + + assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectors(), equalTo(false)); + assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorOffsets(), equalTo(false)); + assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorPositions(), equalTo(false)); + assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorPayloads(), equalTo(false)); + + assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectors(), equalTo(true)); + assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorOffsets(), equalTo(false)); + assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorPositions(), equalTo(false)); + assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorPayloads(), equalTo(false)); + + assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectors(), equalTo(true)); + assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorOffsets(), equalTo(true)); + assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorPositions(), equalTo(false)); + assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorPayloads(), equalTo(false)); + + assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectors(), equalTo(true)); + assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorOffsets(), equalTo(false)); + assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorPositions(), equalTo(true)); + assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorPayloads(), equalTo(false)); + + assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectors(), equalTo(true)); + assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorOffsets(), equalTo(true)); + assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorPositions(), equalTo(true)); + assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorPayloads(), equalTo(false)); + + assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectors(), equalTo(true)); + assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorOffsets(), equalTo(true)); + assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPositions(), equalTo(true)); + assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPayloads(), equalTo(true)); + } + + public void testNullConfigValuesFail() throws MapperParsingException, IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", getFieldType()) + .field("analyzer", (String) null) + .endObject() + .endObject() + .endObject().endObject()); + + Exception e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("[analyzer] must not have a [null] value", e.getMessage()); + } + + public void testNotIndexedField() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", getFieldType()) + .field("index", false) + .endObject().endObject().endObject().endObject()); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("[annotated_text] fields must be indexed", e.getMessage()); + } + + public void testAnalyzedFieldPositionIncrementWithoutPositions() throws IOException { + for (String indexOptions : Arrays.asList("docs", "freqs")) { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", getFieldType()) + .field("index_options", indexOptions) + .field("position_increment_gap", 10) + .endObject().endObject().endObject().endObject()); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("Cannot set position_increment_gap on field [field] without positions enabled", e.getMessage()); + } + } + + public void testEmptyName() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("") + .field("type", getFieldType()) + .endObject() + .endObject() + .endObject().endObject()); + + // Empty name not allowed in index created after 5.0 + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping)) + ); + assertThat(e.getMessage(), containsString("name cannot be empty string")); + } + + + +} diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextParsingTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextParsingTests.java new file mode 100644 index 0000000000000..4df44df5cd514 --- /dev/null +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextParsingTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.annotatedtext; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText.AnnotationToken; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class AnnotatedTextParsingTests extends ESTestCase { + + private void checkParsing(String markup, String expectedPlainText, AnnotationToken... expectedTokens) { + AnnotatedText at = AnnotatedText.parse(markup); + assertEquals(expectedPlainText, at.textMinusMarkup); + List actualAnnotations = at.annotations; + assertEquals(expectedTokens.length, actualAnnotations.size()); + for (int i = 0; i < expectedTokens.length; i++) { + assertEquals(expectedTokens[i], actualAnnotations.get(i)); + } + } + + public void testSingleValueMarkup() { + checkParsing("foo [bar](Y)", "foo bar", new AnnotationToken(4,7,"Y")); + } + + public void testMultiValueMarkup() { + checkParsing("foo [bar](Y&B)", "foo bar", new AnnotationToken(4,7,"Y"), + new AnnotationToken(4,7,"B")); + } + + public void testBlankTextAnnotation() { + checkParsing("It sounded like this:[](theSoundOfOneHandClapping)", "It sounded like this:", + new AnnotationToken(21,21,"theSoundOfOneHandClapping")); + } + + public void testMissingBracket() { + checkParsing("[foo](MissingEndBracket bar", + "[foo](MissingEndBracket bar", new AnnotationToken[0]); + } + + public void testAnnotationWithType() { + Exception expectedException = expectThrows(ElasticsearchParseException.class, + () -> checkParsing("foo [bar](type=foo) baz", "foo bar baz", new AnnotationToken(4,7, "noType"))); + assertThat(expectedException.getMessage(), equalTo("key=value pairs are not supported in annotations")); + } + + public void testMissingValue() { + checkParsing("[foo]() bar", "foo bar", new AnnotationToken[0]); + } + + +} diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java new file mode 100644 index 0000000000000..2fcf917ab1d79 --- /dev/null +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java @@ -0,0 +1,185 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.highlight; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.highlight.DefaultEncoder; +import org.apache.lucene.search.uhighlight.CustomSeparatorBreakIterator; +import org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter; +import org.apache.lucene.search.uhighlight.PassageFormatter; +import org.apache.lucene.search.uhighlight.Snippet; +import org.apache.lucene.search.uhighlight.SplittingBreakIterator; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotationAnalyzerWrapper; +import org.elasticsearch.search.fetch.subphase.highlight.AnnotatedPassageFormatter; +import org.elasticsearch.test.ESTestCase; + +import java.net.URLEncoder; +import java.text.BreakIterator; +import java.util.Locale; + +import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; +import static org.hamcrest.CoreMatchers.equalTo; + +public class AnnotatedTextHighlighterTests extends ESTestCase { + + private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, + Query query, Locale locale, BreakIterator breakIterator, + int noMatchSize, String[] expectedPassages) throws Exception { + + // Annotated fields wrap the usual analyzer with one that injects extra tokens + Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer()); + AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); + hiliteAnalyzer.init(markedUpInputs); + PassageFormatter passageFormatter = new AnnotatedPassageFormatter(hiliteAnalyzer,new DefaultEncoder()); + String []plainTextForHighlighter = hiliteAnalyzer.getPlainTextValuesForHighlighter(); + + + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(wrapperAnalyzer); + iwc.setMergePolicy(newTieredMergePolicy(random())); + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); + FieldType ft = new FieldType(TextField.TYPE_STORED); + if (randomBoolean()) { + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); + } else { + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + } + ft.freeze(); + Document doc = new Document(); + for (String input : markedUpInputs) { + Field field = new Field(fieldName, "", ft); + field.setStringValue(input); + doc.add(field); + } + iw.addDocument(doc); + DirectoryReader reader = iw.getReader(); + IndexSearcher searcher = newSearcher(reader); + iw.close(); + TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); + assertThat(topDocs.totalHits.value, equalTo(1L)); + String rawValue = Strings.arrayToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR)); + + CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, hiliteAnalyzer, null, + passageFormatter, locale, + breakIterator, rawValue, noMatchSize); + highlighter.setFieldMatcher((name) -> "text".equals(name)); + final Snippet[] snippets = + highlighter.highlightField("text", query, topDocs.scoreDocs[0].doc, expectedPassages.length); + assertEquals(expectedPassages.length, snippets.length); + for (int i = 0; i < snippets.length; i++) { + assertEquals(expectedPassages[i], snippets[i].getText()); + } + reader.close(); + dir.close(); + } + + + public void testAnnotatedTextStructuredMatch() throws Exception { + // Check that a structured token eg a URL can be highlighted in a query + // on marked-up + // content using an "annotated_text" type field. + String url = "https://en.wikipedia.org/wiki/Key_Word_in_Context"; + String encodedUrl = URLEncoder.encode(url, "UTF-8"); + String annotatedWord = "[highlighting](" + encodedUrl + ")"; + String highlightedAnnotatedWord = "[highlighting](" + AnnotatedPassageFormatter.SEARCH_HIT_TYPE + "=" + encodedUrl + "&" + + encodedUrl + ")"; + final String[] markedUpInputs = { "This is a test. Just a test1 " + annotatedWord + " from [annotated](bar) highlighter.", + "This is the second " + annotatedWord + " value to perform highlighting on a longer text that gets scored lower." }; + + String[] expectedPassages = { + "This is a test. Just a test1 " + highlightedAnnotatedWord + " from [annotated](bar) highlighter.", + "This is the second " + highlightedAnnotatedWord + " value to perform highlighting on a" + + " longer text that gets scored lower." }; + Query query = new TermQuery(new Term("text", url)); + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); + assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); + } + + public void testAnnotatedTextOverlapsWithUnstructuredSearchTerms() throws Exception { + final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore", + "Donald duck is a [Disney](Disney+Inc) invention" }; + + String[] expectedPassages = { "[Donald](_hit_term=donald) Trump visited Singapore", + "[Donald](_hit_term=donald) duck is a [Disney](Disney+Inc) invention" }; + Query query = new TermQuery(new Term("text", "donald")); + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); + assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); + } + + public void testAnnotatedTextMultiFieldWithBreakIterator() throws Exception { + final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore. Kim shook hands with Donald", + "Donald duck is a [Disney](Disney+Inc) invention" }; + String[] expectedPassages = { "[Donald](_hit_term=donald) Trump visited Singapore", + "Kim shook hands with [Donald](_hit_term=donald)", + "[Donald](_hit_term=donald) duck is a [Disney](Disney+Inc) invention" }; + Query query = new TermQuery(new Term("text", "donald")); + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); + breakIterator = new SplittingBreakIterator(breakIterator, '.'); + assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); + } + + public void testAnnotatedTextSingleFieldWithBreakIterator() throws Exception { + final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore. Kim shook hands with Donald"}; + String[] expectedPassages = { "[Donald](_hit_term=donald) Trump visited Singapore", + "Kim shook hands with [Donald](_hit_term=donald)"}; + Query query = new TermQuery(new Term("text", "donald")); + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); + breakIterator = new SplittingBreakIterator(breakIterator, '.'); + assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); + } + + public void testAnnotatedTextSingleFieldWithPhraseQuery() throws Exception { + final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore", + "Donald Jr was with Melania Trump"}; + String[] expectedPassages = { "[Donald](_hit_term=donald) [Trump](_hit_term=trump) visited Singapore"}; + Query query = new PhraseQuery("text", "donald", "trump"); + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); + assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); + } + + public void testBadAnnotation() throws Exception { + final String[] markedUpInputs = { "Missing bracket for [Donald Trump](Donald+Trump visited Singapore"}; + String[] expectedPassages = { "Missing bracket for [Donald Trump](Donald+Trump visited [Singapore](_hit_term=singapore)"}; + Query query = new TermQuery(new Term("text", "singapore")); + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); + assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); + } + +} diff --git a/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml b/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml new file mode 100644 index 0000000000000..64e0b863bf976 --- /dev/null +++ b/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml @@ -0,0 +1,44 @@ +# Integration tests for Mapper Annotated_text components +# + +--- +"annotated highlighter on annotated text": + - skip: + version: " - 6.99.99" + reason: Annotated text type introduced in 7.0.0-alpha1 + + - do: + indices.create: + index: annotated + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + mappings: + doc: + properties: + text: + type: annotated_text + entityID: + type: keyword + + - do: + index: + index: annotated + type: doc + body: + "text" : "The [quick brown fox](entity_3789) is brown." + "entityID": "entity_3789" + refresh: true + + - do: + search: + body: { "query" : {"term" : { "entityID" : "entity_3789" } }, "highlight" : { "type" : "annotated", "require_field_match": false, "fields" : { "text" : {} } } } + + - match: {hits.hits.0.highlight.text.0: "The [quick brown fox](_hit_term=entity_3789&entity_3789) is brown."} + + - do: + search: + body: { "query" : {"term" : { "text" : "quick" } }, "highlight" : { "type" : "annotated", "require_field_match": false, "fields" : { "text" : {} } } } + + - match: {hits.hits.0.highlight.text.0: "The [quick](_hit_term=quick) brown fox is brown."} diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index 8fd6bd9ad3f15..7aeb03851a5d9 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -266,6 +266,10 @@ fi install_and_check_plugin mapper murmur3 } +@test "[$GROUP] install annotated-text mapper plugin" { + install_and_check_plugin mapper annotated-text +} + @test "[$GROUP] check reindex module" { check_module reindex } @@ -380,6 +384,10 @@ fi remove_plugin mapper-murmur3 } +@test "[$GROUP] remove annotated-text mapper plugin" { + remove_plugin mapper-annotated-text +} + @test "[$GROUP] remove size mapper plugin" { remove_plugin mapper-size } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java index c1c42fb45a44a..6ae302ee87a25 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java @@ -18,10 +18,13 @@ */ package org.elasticsearch.search.fetch.subphase.highlight; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.highlight.SimpleHTMLEncoder; import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; @@ -70,8 +73,18 @@ public static List loadFieldValues(SearchContextHighlight.Field field, return textsToHighlight; } - static class Encoders { - static final Encoder DEFAULT = new DefaultEncoder(); - static final Encoder HTML = new SimpleHTMLEncoder(); + public static class Encoders { + public static final Encoder DEFAULT = new DefaultEncoder(); + public static final Encoder HTML = new SimpleHTMLEncoder(); } + + static Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) { + if (type instanceof KeywordFieldMapper.KeywordFieldType) { + KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) type; + if (keywordFieldType.normalizer() != null) { + return keywordFieldType.normalizer(); + } + } + return docMapper.mappers().indexAnalyzer(); + } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java index 1ac3f4789cb05..ec5071706b031 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -49,7 +49,6 @@ import java.util.Map; import static org.elasticsearch.search.fetch.subphase.highlight.UnifiedHighlighter.convertFieldValue; -import static org.elasticsearch.search.fetch.subphase.highlight.UnifiedHighlighter.getAnalyzer; public class PlainHighlighter implements Highlighter { private static final String CACHE_KEY = "highlight-plain"; @@ -102,7 +101,7 @@ public HighlightField highlight(HighlighterContext highlighterContext) { int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? 1 : field.fieldOptions().numberOfFragments(); ArrayList fragsList = new ArrayList<>(); List textsToHighlight; - Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType); + Analyzer analyzer = HighlightUtils.getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType); final int maxAnalyzedOffset = context.indexShard().indexSettings().getHighlightMaxAnalyzedOffset(); try { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java index 2c9d482cab0b2..123e18a4da618 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.uhighlight.CustomPassageFormatter; import org.apache.lucene.search.uhighlight.CustomSeparatorBreakIterator; import org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter; +import org.apache.lucene.search.uhighlight.PassageFormatter; import org.apache.lucene.search.uhighlight.Snippet; import org.apache.lucene.search.uhighlight.UnifiedHighlighter.OffsetSource; import org.apache.lucene.util.BytesRef; @@ -34,7 +35,6 @@ import org.elasticsearch.common.text.Text; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -54,7 +54,7 @@ public class UnifiedHighlighter implements Highlighter { public boolean canHighlight(MappedFieldType fieldType) { return true; } - + @Override public HighlightField highlight(HighlighterContext highlighterContext) { MappedFieldType fieldType = highlighterContext.fieldType; @@ -62,23 +62,18 @@ public HighlightField highlight(HighlighterContext highlighterContext) { SearchContext context = highlighterContext.context; FetchSubPhase.HitContext hitContext = highlighterContext.hitContext; Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT; - CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0], - field.fieldOptions().postTags()[0], encoder); final int maxAnalyzedOffset = context.indexShard().indexSettings().getHighlightMaxAnalyzedOffset(); List snippets = new ArrayList<>(); int numberOfFragments; try { - final Analyzer analyzer = - getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType); - List fieldValues = HighlightUtils.loadFieldValues(field, fieldType, context, hitContext); - fieldValues = fieldValues.stream() - .map((s) -> convertFieldValue(fieldType, s)) - .collect(Collectors.toList()); + final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType); + List fieldValues = loadFieldValues(fieldType, field, context, hitContext); if (fieldValues.size() == 0) { return null; } + final PassageFormatter passageFormatter = getPassageFormatter(field, encoder); final IndexSearcher searcher = new IndexSearcher(hitContext.reader()); final CustomUnifiedHighlighter highlighter; final String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR); @@ -145,7 +140,27 @@ public HighlightField highlight(HighlighterContext highlighterContext) { return null; } - private BreakIterator getBreakIterator(SearchContextHighlight.Field field) { + protected PassageFormatter getPassageFormatter(SearchContextHighlight.Field field, Encoder encoder) { + CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0], + field.fieldOptions().postTags()[0], encoder); + return passageFormatter; + } + + + protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) { + return HighlightUtils.getAnalyzer(docMapper, type); + } + + protected List loadFieldValues(MappedFieldType fieldType, SearchContextHighlight.Field field, SearchContext context, + FetchSubPhase.HitContext hitContext) throws IOException { + List fieldValues = HighlightUtils.loadFieldValues(field, fieldType, context, hitContext); + fieldValues = fieldValues.stream() + .map((s) -> convertFieldValue(fieldType, s)) + .collect(Collectors.toList()); + return fieldValues; + } + + protected BreakIterator getBreakIterator(SearchContextHighlight.Field field) { final SearchContextHighlight.FieldOptions fieldOptions = field.fieldOptions(); final Locale locale = fieldOptions.boundaryScannerLocale() != null ? fieldOptions.boundaryScannerLocale() : @@ -168,7 +183,7 @@ private BreakIterator getBreakIterator(SearchContextHighlight.Field field) { } } - private static List filterSnippets(List snippets, int numberOfFragments) { + protected static List filterSnippets(List snippets, int numberOfFragments) { //We need to filter the snippets as due to no_match_size we could have //either highlighted snippets or non highlighted ones and we don't want to mix those up @@ -203,17 +218,7 @@ private static List filterSnippets(List snippets, int numberOf return filteredSnippets; } - static Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) { - if (type instanceof KeywordFieldMapper.KeywordFieldType) { - KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) type; - if (keywordFieldType.normalizer() != null) { - return keywordFieldType.normalizer(); - } - } - return docMapper.mappers().indexAnalyzer(); - } - - static String convertFieldValue(MappedFieldType type, Object value) { + protected static String convertFieldValue(MappedFieldType type, Object value) { if (value instanceof BytesRef) { return type.valueForDisplay(value).toString(); } else { @@ -221,14 +226,14 @@ static String convertFieldValue(MappedFieldType type, Object value) { } } - private static String mergeFieldValues(List fieldValues, char valuesSeparator) { + protected static String mergeFieldValues(List fieldValues, char valuesSeparator) { //postings highlighter accepts all values in a single string, as offsets etc. need to match with content //loaded from stored fields, we merge all values using a proper separator String rawValue = Strings.collectionToDelimitedString(fieldValues, String.valueOf(valuesSeparator)); return rawValue.substring(0, Math.min(rawValue.length(), Integer.MAX_VALUE - 1)); } - private OffsetSource getOffsetSource(MappedFieldType fieldType) { + protected OffsetSource getOffsetSource(MappedFieldType fieldType) { if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) { return fieldType.storeTermVectors() ? OffsetSource.POSTINGS_WITH_TERM_VECTORS : OffsetSource.POSTINGS; } From 9543992d8e1c15215055942a15b670d7b0213d75 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 18 Sep 2018 11:51:11 +0100 Subject: [PATCH 14/46] HLRC: Get ML calendars (#33760) --- .../client/MLRequestConverters.java | 17 ++- .../client/MachineLearningClient.java | 40 +++++++ .../client/ml/GetCalendarsRequest.java | 104 ++++++++++++++++++ .../client/ml/GetCalendarsResponse.java | 86 +++++++++++++++ .../client/ml/job/results/AnomalyRecord.java | 16 +-- .../client/ml/job/results/Bucket.java | 15 +-- .../ml/job/results/BucketInfluencer.java | 16 +-- .../client/ml/job/results/Influencer.java | 16 +-- .../client/ml/job/results/OverallBucket.java | 16 +-- .../client/ml/job/results/Result.java | 1 - .../client/MLRequestConvertersTests.java | 25 ++++- .../client/MachineLearningIT.java | 27 ++++- .../MlClientDocumentationIT.java | 65 +++++++++++ .../client/ml/GetCalendarsRequestTests.java | 46 ++++++++ .../client/ml/GetCalendarsResponseTests.java | 52 +++++++++ .../high-level/ml/get-calendars.asciidoc | 83 ++++++++++++++ .../high-level/ml/put-calendar.asciidoc | 2 +- .../high-level/supported-apis.asciidoc | 2 + 18 files changed, 563 insertions(+), 66 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsRequest.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsResponse.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsRequestTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsResponseTests.java create mode 100644 docs/java-rest/high-level/ml/get-calendars.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 1a681822eca27..bc2ff7b17d57b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -34,6 +34,7 @@ import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetCalendarsRequest; import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetDatafeedRequest; import org.elasticsearch.client.ml.GetInfluencersRequest; @@ -229,7 +230,7 @@ static Request deleteDatafeed(DeleteDatafeedRequest deleteDatafeedRequest) { return request; } - static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) throws IOException { + static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") .addPathPartAsIs("ml") @@ -305,7 +306,7 @@ static Request getRecords(GetRecordsRequest getRecordsRequest) throws IOExceptio return request; } - static Request postData(PostDataRequest postDataRequest) throws IOException { + static Request postData(PostDataRequest postDataRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") .addPathPartAsIs("ml") @@ -359,4 +360,16 @@ static Request putCalendar(PutCalendarRequest putCalendarRequest) throws IOExcep request.setEntity(createEntity(putCalendarRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } + + static Request getCalendars(GetCalendarsRequest getCalendarsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("calendars") + .addPathPart(getCalendarsRequest.getCalendarId()) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getCalendarsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index caaf1326dbdb3..5edb5115d857a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -31,6 +31,8 @@ import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCalendarsRequest; +import org.elasticsearch.client.ml.GetCalendarsResponse; import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetCategoriesResponse; import org.elasticsearch.client.ml.GetDatafeedRequest; @@ -792,6 +794,44 @@ public void postDataAsync(PostDataRequest request, RequestOptions options, Actio Collections.emptySet()); } + /** + * Gets a single or multiple calendars. + *

+ * For additional info + * see ML GET calendars documentation + * + * @param request The calendars request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link GetCalendarsResponse} response object containing the {@link org.elasticsearch.client.ml.calendars.Calendar} + * objects and the number of calendars found + */ + public GetCalendarsResponse getCalendars(GetCalendarsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getCalendars, + options, + GetCalendarsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets a single or multiple calendars, notifies listener once the requested records are retrieved. + *

+ * For additional info + * see ML GET calendars documentation + * + * @param request The calendars request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getCalendarsAsync(GetCalendarsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getCalendars, + options, + GetCalendarsResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets the influencers for a Machine Learning Job. *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsRequest.java new file mode 100644 index 0000000000000..322efc19927dd --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsRequest.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.calendars.Calendar; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import org.elasticsearch.client.ml.job.util.PageParams; + +import java.io.IOException; +import java.util.Objects; + +public class GetCalendarsRequest extends ActionRequest implements ToXContentObject { + + public static final ObjectParser PARSER = + new ObjectParser<>("get_calendars_request", GetCalendarsRequest::new); + + static { + PARSER.declareString(GetCalendarsRequest::setCalendarId, Calendar.ID); + PARSER.declareObject(GetCalendarsRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + } + + private String calendarId; + private PageParams pageParams; + + public GetCalendarsRequest() { + } + + public GetCalendarsRequest(String calendarId) { + this.calendarId = calendarId; + } + + public String getCalendarId() { + return calendarId; + } + + public void setCalendarId(String calendarId) { + this.calendarId = calendarId; + } + + public PageParams getPageParams() { + return pageParams; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (calendarId != null) { + builder.field(Calendar.ID.getPreferredName(), calendarId); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(calendarId, pageParams); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetCalendarsRequest other = (GetCalendarsRequest) obj; + return Objects.equals(calendarId, other.calendarId) && Objects.equals(pageParams, other.pageParams); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsResponse.java new file mode 100644 index 0000000000000..e07b90f34e28f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsResponse.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.calendars.Calendar; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class GetCalendarsResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("calendars"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("calendars_response", true, + a -> new GetCalendarsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), Calendar.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), AbstractResultResponse.COUNT); + } + + public static GetCalendarsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetCalendarsResponse(List calendars, long count) { + super(RESULTS_FIELD, calendars, count); + } + + /** + * The collection of {@link Calendar} objects found in the query + */ + public List calendars() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetCalendarsResponse other = (GetCalendarsResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java index db4483fef4bfd..c10610a872f17 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java @@ -19,16 +19,14 @@ package org.elasticsearch.client.ml.job.results; import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser.Token; import java.io.IOException; -import java.time.format.DateTimeFormatter; import java.util.Collections; import java.util.Date; import java.util.List; @@ -90,15 +88,9 @@ public class AnomalyRecord implements ToXContentObject { static { PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); - PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { - if (p.currentToken() == Token.VALUE_NUMBER) { - return new Date(p.longValue()); - } else if (p.currentToken() == Token.VALUE_STRING) { - return new Date(DateFormatters.toZonedDateTime(DateTimeFormatter.ISO_INSTANT.parse(p.text())).toInstant().toEpochMilli()); - } - throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" - + Result.TIMESTAMP.getPreferredName() + "]"); - }, Result.TIMESTAMP, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p) -> TimeUtil.parseTimeField(p, Result.TIMESTAMP.getPreferredName()), + Result.TIMESTAMP, ValueType.VALUE); PARSER.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); PARSER.declareString((anomalyRecord, s) -> {}, Result.RESULT_TYPE); PARSER.declareDouble(AnomalyRecord::setProbability, PROBABILITY); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java index 2dfed4c383403..9f549f16bbc0b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java @@ -19,16 +19,14 @@ package org.elasticsearch.client.ml.job.results; import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser.Token; import java.io.IOException; -import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Collections; import java.util.Date; @@ -63,15 +61,8 @@ public class Bucket implements ToXContentObject { static { PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); - PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { - if (p.currentToken() == Token.VALUE_NUMBER) { - return new Date(p.longValue()); - } else if (p.currentToken() == Token.VALUE_STRING) { - return new Date(DateFormatters.toZonedDateTime(DateTimeFormatter.ISO_INSTANT.parse(p.text())).toInstant().toEpochMilli()); - } - throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" - + Result.TIMESTAMP.getPreferredName() + "]"); - }, Result.TIMESTAMP, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p) -> TimeUtil.parseTimeField(p, Result.TIMESTAMP.getPreferredName()), Result.TIMESTAMP, ValueType.VALUE); PARSER.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); PARSER.declareDouble(Bucket::setAnomalyScore, ANOMALY_SCORE); PARSER.declareDouble(Bucket::setInitialAnomalyScore, INITIAL_ANOMALY_SCORE); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java index 6fc2a9b8b2d54..ade5a5a2f50f2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java @@ -19,16 +19,14 @@ package org.elasticsearch.client.ml.job.results; import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser.Token; import java.io.IOException; -import java.time.format.DateTimeFormatter; import java.util.Date; import java.util.Objects; @@ -56,15 +54,9 @@ public class BucketInfluencer implements ToXContentObject { static { PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); - PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { - if (p.currentToken() == Token.VALUE_NUMBER) { - return new Date(p.longValue()); - } else if (p.currentToken() == Token.VALUE_STRING) { - return new Date(DateFormatters.toZonedDateTime(DateTimeFormatter.ISO_INSTANT.parse(p.text())).toInstant().toEpochMilli()); - } - throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" - + Result.TIMESTAMP.getPreferredName() + "]"); - }, Result.TIMESTAMP, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p) -> TimeUtil.parseTimeField(p, Result.TIMESTAMP.getPreferredName()), + Result.TIMESTAMP, ValueType.VALUE); PARSER.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); PARSER.declareString((bucketInfluencer, s) -> {}, Result.RESULT_TYPE); PARSER.declareString(BucketInfluencer::setInfluencerFieldName, INFLUENCER_FIELD_NAME); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java index 28ceb243bf6b2..4892b7f93468d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java @@ -19,16 +19,14 @@ package org.elasticsearch.client.ml.job.results; import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser.Token; import java.io.IOException; -import java.time.format.DateTimeFormatter; import java.util.Date; import java.util.Objects; @@ -61,15 +59,9 @@ public class Influencer implements ToXContentObject { PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); PARSER.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_NAME); PARSER.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_VALUE); - PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { - if (p.currentToken() == Token.VALUE_NUMBER) { - return new Date(p.longValue()); - } else if (p.currentToken() == Token.VALUE_STRING) { - return new Date(DateFormatters.toZonedDateTime(DateTimeFormatter.ISO_INSTANT.parse(p.text())).toInstant().toEpochMilli()); - } - throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" - + Result.TIMESTAMP.getPreferredName() + "]"); - }, Result.TIMESTAMP, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p) -> TimeUtil.parseTimeField(p, Result.TIMESTAMP.getPreferredName()), + Result.TIMESTAMP, ValueType.VALUE); PARSER.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); PARSER.declareString((influencer, s) -> {}, Result.RESULT_TYPE); PARSER.declareDouble(Influencer::setProbability, PROBABILITY); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java index eaf050f8be9fb..722c2361b6762 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java @@ -19,16 +19,14 @@ package org.elasticsearch.client.ml.job.results; import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import java.time.format.DateTimeFormatter; import java.util.Collections; import java.util.Date; import java.util.List; @@ -56,15 +54,9 @@ public class OverallBucket implements ToXContentObject { a -> new OverallBucket((Date) a[0], (long) a[1], (double) a[2], (boolean) a[3])); static { - PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { - if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return new Date(p.longValue()); - } else if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return new Date(DateFormatters.toZonedDateTime(DateTimeFormatter.ISO_INSTANT.parse(p.text())).toInstant().toEpochMilli()); - } - throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" - + Result.TIMESTAMP.getPreferredName() + "]"); - }, Result.TIMESTAMP, ObjectParser.ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p) -> TimeUtil.parseTimeField(p, Result.TIMESTAMP.getPreferredName()), + Result.TIMESTAMP, ObjectParser.ValueType.VALUE); PARSER.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); PARSER.declareDouble(ConstructingObjectParser.constructorArg(), OVERALL_SCORE); PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), Result.IS_INTERIM); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java index a7f8933a0a131..f98aef55f5bc4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java @@ -28,7 +28,6 @@ public final class Result { /** * Serialisation fields */ - public static final ParseField TYPE = new ParseField("result"); public static final ParseField RESULT_TYPE = new ParseField("result_type"); public static final ParseField TIMESTAMP = new ParseField("timestamp"); public static final ParseField IS_INTERIM = new ParseField("is_interim"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 61122901b86ee..fdd4200ee81b9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetCalendarsRequest; import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetDatafeedRequest; import org.elasticsearch.client.ml.GetInfluencersRequest; @@ -259,7 +260,7 @@ public void testDeleteDatafeed() { assertEquals(Boolean.toString(true), request.getParameters().get("force")); } - public void testDeleteForecast() throws Exception { + public void testDeleteForecast() { String jobId = randomAlphaOfLength(10); DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(jobId); @@ -415,6 +416,28 @@ public void testPutCalendar() throws IOException { } } + public void testGetCalendars() throws IOException { + GetCalendarsRequest getCalendarsRequest = new GetCalendarsRequest(); + String expectedEndpoint = "/_xpack/ml/calendars"; + + if (randomBoolean()) { + String calendarId = randomAlphaOfLength(10); + getCalendarsRequest.setCalendarId(calendarId); + expectedEndpoint += "/" + calendarId; + } + if (randomBoolean()) { + getCalendarsRequest.setPageParams(new PageParams(10, 20)); + } + + Request request = MLRequestConverters.getCalendars(getCalendarsRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals(expectedEndpoint, request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetCalendarsRequest parsedRequest = GetCalendarsRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getCalendarsRequest)); + } + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 5349378e335da..e90d541b9c79a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -32,6 +32,8 @@ import org.elasticsearch.client.ml.FlushJobResponse; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.ForecastJobResponse; +import org.elasticsearch.client.ml.GetCalendarsRequest; +import org.elasticsearch.client.ml.GetCalendarsResponse; import org.elasticsearch.client.ml.GetDatafeedRequest; import org.elasticsearch.client.ml.GetDatafeedResponse; import org.elasticsearch.client.ml.GetJobRequest; @@ -483,7 +485,6 @@ private boolean forecastExists(String jobId, String forecastId) throws Exception } public void testPutCalendar() throws IOException { - Calendar calendar = CalendarTests.testInstance(); MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); PutCalendarResponse putCalendarResponse = execute(new PutCalendarRequest(calendar), machineLearningClient::putCalendar, @@ -492,6 +493,30 @@ public void testPutCalendar() throws IOException { assertThat(putCalendarResponse.getCalendar(), equalTo(calendar)); } + public void testGetCalendars() throws Exception { + Calendar calendar1 = CalendarTests.testInstance(); + Calendar calendar2 = CalendarTests.testInstance(); + + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putCalendar(new PutCalendarRequest(calendar1), RequestOptions.DEFAULT); + machineLearningClient.putCalendar(new PutCalendarRequest(calendar2), RequestOptions.DEFAULT); + + GetCalendarsRequest getCalendarsRequest = new GetCalendarsRequest(); + getCalendarsRequest.setCalendarId("_all"); + GetCalendarsResponse getCalendarsResponse = execute(getCalendarsRequest, machineLearningClient::getCalendars, + machineLearningClient::getCalendarsAsync); + assertEquals(2, getCalendarsResponse.count()); + assertEquals(2, getCalendarsResponse.calendars().size()); + assertThat(getCalendarsResponse.calendars().stream().map(Calendar::getId).collect(Collectors.toList()), + hasItems(calendar1.getId(), calendar1.getId())); + + getCalendarsRequest.setCalendarId(calendar1.getId()); + getCalendarsResponse = execute(getCalendarsRequest, machineLearningClient::getCalendars, + machineLearningClient::getCalendarsAsync); + assertEquals(1, getCalendarsResponse.count()); + assertEquals(calendar1, getCalendarsResponse.calendars().get(0)); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index f0f7ffd939f5d..ddaf9d8db6cc8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -43,6 +43,8 @@ import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCalendarsRequest; +import org.elasticsearch.client.ml.GetCalendarsResponse; import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetCategoriesResponse; import org.elasticsearch.client.ml.GetDatafeedRequest; @@ -880,6 +882,7 @@ public void testDeleteForecast() throws Exception { PostDataRequest postDataRequest = new PostDataRequest(job.getId(), builder); client.machineLearning().postData(postDataRequest, RequestOptions.DEFAULT); client.machineLearning().flushJob(new FlushJobRequest(job.getId()), RequestOptions.DEFAULT); + ForecastJobResponse forecastJobResponse = client.machineLearning(). forecastJob(new ForecastJobRequest(job.getId()), RequestOptions.DEFAULT); String forecastId = forecastJobResponse.getForecastId(); @@ -1526,4 +1529,66 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testGetCalendar() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + Calendar calendar = new Calendar("holidays", Collections.singletonList("job_1"), "A calendar for public holidays"); + PutCalendarRequest putRequest = new PutCalendarRequest(calendar); + client.machineLearning().putCalendar(putRequest, RequestOptions.DEFAULT); + { + //tag::x-pack-ml-get-calendars-request + GetCalendarsRequest request = new GetCalendarsRequest(); // <1> + //end::x-pack-ml-get-calendars-request + + //tag::x-pack-ml-get-calendars-id + request.setCalendarId("holidays"); // <1> + //end::x-pack-ml-get-calendars-id + + //tag::x-pack-ml-get-calendars-page + request.setPageParams(new PageParams(10, 20)); // <1> + //end::x-pack-ml-get-calendars-page + + // reset page params + request.setPageParams(null); + + //tag::x-pack-ml-get-calendars-execution + GetCalendarsResponse response = client.machineLearning().getCalendars(request, RequestOptions.DEFAULT); + //end::x-pack-ml-get-calendars-execution + + // tag::x-pack-ml-get-calendars-response + long count = response.count(); // <1> + List calendars = response.calendars(); // <2> + // end::x-pack-ml-get-calendars-response + assertEquals(1, calendars.size()); + } + { + GetCalendarsRequest request = new GetCalendarsRequest("holidays"); + + // tag::x-pack-ml-get-calendars-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetCalendarsResponse getCalendarsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-calendars-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-calendars-execute-async + client.machineLearning().getCalendarsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-calendars-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsRequestTests.java new file mode 100644 index 0000000000000..b7ca44fd5faf1 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsRequestTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class GetCalendarsRequestTests extends AbstractXContentTestCase { + + @Override + protected GetCalendarsRequest createTestInstance() { + GetCalendarsRequest request = new GetCalendarsRequest(); + request.setCalendarId(randomAlphaOfLength(9)); + if (randomBoolean()) { + request.setPageParams(new PageParams(1, 2)); + } + return request; + } + + @Override + protected GetCalendarsRequest doParseInstance(XContentParser parser) { + return GetCalendarsRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsResponseTests.java new file mode 100644 index 0000000000000..fd28e410cdc94 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsResponseTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.calendars.Calendar; +import org.elasticsearch.client.ml.calendars.CalendarTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetCalendarsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetCalendarsResponse createTestInstance() { + List calendars = new ArrayList<>(); + int count = randomIntBetween(0, 3); + for (int i=0; i Constructing a new request for all calendars + + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-calendars-id] +-------------------------------------------------- +<1> Construct a request for the single calendar `holidays` + + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-calendars-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of calendars to skip. +`size` specifies the maximum number of calendars to get. Defaults to `0` and `100` respectively. + +[[java-rest-high-x-pack-ml-get-calendars-execution]] +==== Execution +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-calendars-execution] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-get-calendars-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-calendars-execute-async] +-------------------------------------------------- +<1> The `GetCalendarsRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetCalendarsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-calendars-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-get-calendars-response]] +==== Get calendars Response + +The returned `GetCalendarsResponse` contains the requested calendars: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-calendars-response] +-------------------------------------------------- +<1> The count of calendars that were matched +<2> The calendars retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/put-calendar.asciidoc b/docs/java-rest/high-level/ml/put-calendar.asciidoc index e6814c76fad5f..5d163f37eb465 100644 --- a/docs/java-rest/high-level/ml/put-calendar.asciidoc +++ b/docs/java-rest/high-level/ml/put-calendar.asciidoc @@ -4,7 +4,7 @@ Creates a new {ml} calendar. The API accepts a `PutCalendarRequest` and responds with a `PutCalendarResponse` object. -[[java-rest-high-x-pack-ml-get-calendars-request]] +[[java-rest-high-x-pack-ml-put-calendar-request]] ==== Put Calendar Request A `PutCalendarRequest` is constructed with a Calendar object diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 78a9f0bc7c261..2c907dd205376 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -231,6 +231,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <> * <> * <> +* <> * <> include::ml/put-job.asciidoc[] @@ -252,6 +253,7 @@ include::ml/get-records.asciidoc[] include::ml/post-data.asciidoc[] include::ml/get-influencers.asciidoc[] include::ml/get-categories.asciidoc[] +include::ml/get-calendars.asciidoc[] include::ml/put-calendar.asciidoc[] == Migration APIs From 421f58e17243a08144a1f819d2896da4cef5877b Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 18 Sep 2018 12:01:16 +0100 Subject: [PATCH 15/46] Remove discovery-file plugin (#33257) In #33241 we moved the file-based discovery functionality to core Elasticsearch, but preserved the `discovery-file` plugin, and support for the existing location of the `unicast_hosts.txt` file, for BWC reasons. This commit completes the removal of this plugin. --- docs/plugins/discovery-file.asciidoc | 14 ----- docs/plugins/discovery.asciidoc | 6 -- docs/reference/cat/plugins.asciidoc | 1 - .../migration/migrate_7_0/plugins.asciidoc | 10 ++- plugins/discovery-file/build.gradle | 61 ------------------- .../config/discovery-file/unicast_hosts.txt | 20 ------ .../file/FileBasedDiscoveryPlugin.java | 51 ---------------- ...leBasedDiscoveryClientYamlTestSuiteIT.java | 40 ------------ ...eBasedDiscoveryPluginDeprecationTests.java | 32 ---------- .../test/discovery_file/10_basic.yml | 13 ---- .../tests/module_and_plugin_test_cases.bash | 8 --- .../zen/FileBasedUnicastHostsProvider.java | 26 +++----- .../FileBasedUnicastHostsProviderTests.java | 46 +------------- 13 files changed, 19 insertions(+), 309 deletions(-) delete mode 100644 docs/plugins/discovery-file.asciidoc delete mode 100644 plugins/discovery-file/build.gradle delete mode 100644 plugins/discovery-file/config/discovery-file/unicast_hosts.txt delete mode 100644 plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java delete mode 100644 plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java delete mode 100644 plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java delete mode 100644 plugins/discovery-file/src/test/resources/rest-api-spec/test/discovery_file/10_basic.yml diff --git a/docs/plugins/discovery-file.asciidoc b/docs/plugins/discovery-file.asciidoc deleted file mode 100644 index 4f2182da056a0..0000000000000 --- a/docs/plugins/discovery-file.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -[[discovery-file]] -=== File-Based Discovery Plugin - -The functionality provided by the `discovery-file` plugin is now available in -Elasticsearch without requiring a plugin. This plugin still exists to ensure -backwards compatibility, but it will be removed in a future version. - -On installation, this plugin creates a file at -`$ES_PATH_CONF/discovery-file/unicast_hosts.txt` that comprises comments that -describe how to use it. It is preferable not to install this plugin and instead -to create this file, and its containing directory, using standard tools. - -:plugin_name: discovery-file -include::install_remove.asciidoc[] diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc index 39afbea96dc0a..46b61146b128d 100644 --- a/docs/plugins/discovery.asciidoc +++ b/docs/plugins/discovery.asciidoc @@ -21,10 +21,6 @@ The Azure Classic discovery plugin uses the Azure Classic API for unicast discov The Google Compute Engine discovery plugin uses the GCE API for unicast discovery. -<>:: - -The File-based discovery plugin allows providing the unicast hosts list through a dynamically updatable file. - [float] ==== Community contributed discovery plugins @@ -38,5 +34,3 @@ include::discovery-ec2.asciidoc[] include::discovery-azure-classic.asciidoc[] include::discovery-gce.asciidoc[] - -include::discovery-file.asciidoc[] diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index 9cb8332183590..265a9e270f581 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -23,7 +23,6 @@ U7321H6 analysis-stempel {version} The Stempel (Polish) Analysis plugin i U7321H6 analysis-ukrainian {version} The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch. U7321H6 discovery-azure-classic {version} The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism U7321H6 discovery-ec2 {version} The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism. -U7321H6 discovery-file {version} Discovery file plugin enables unicast discovery from hosts stored in a file. U7321H6 discovery-gce {version} The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism. U7321H6 ingest-attachment {version} Ingest processor that uses Apache Tika to extract contents U7321H6 ingest-geoip {version} Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc index f8434993078b1..462823a61fd00 100644 --- a/docs/reference/migration/migrate_7_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -22,4 +22,12 @@ See {plugins}/repository-gcs-client.html#repository-gcs-client[Google Cloud Stor ==== Analysis Plugin changes * The misspelled helper method `requriesAnalysisSettings(AnalyzerProvider provider)` has been -renamed to `requiresAnalysisSettings` \ No newline at end of file +renamed to `requiresAnalysisSettings` + +==== File-based discovery plugin + +* This plugin has been removed since its functionality is now part of +Elasticsearch and requires no plugin. The location of the hosts file has moved +from `$ES_PATH_CONF/file-discovery/unicast_hosts.txt` to +`$ES_PATH_CONF/unicast_hosts.txt`. See <> for further information. diff --git a/plugins/discovery-file/build.gradle b/plugins/discovery-file/build.gradle deleted file mode 100644 index e7f2b3442716f..0000000000000 --- a/plugins/discovery-file/build.gradle +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import org.elasticsearch.gradle.test.ClusterConfiguration -import org.elasticsearch.gradle.test.ClusterFormationTasks -import org.elasticsearch.gradle.test.NodeInfo - -esplugin { - description 'Discovery file plugin enables unicast discovery from hosts stored in a file.' - classname 'org.elasticsearch.discovery.file.FileBasedDiscoveryPlugin' -} - -bundlePlugin { - from('config/discovery-file') { - into 'config' - } -} - -task setupSeedNodeAndUnicastHostsFile(type: DefaultTask) { - mustRunAfter(precommit) -} -// setup the initial cluster with one node that will serve as the seed node -// for unicast discovery -ClusterConfiguration config = new ClusterConfiguration(project) -config.distribution = System.getProperty('tests.distribution', 'integ-test-zip') -config.clusterName = 'discovery-file-test-cluster' -List nodes = ClusterFormationTasks.setup(project, 'initialCluster', setupSeedNodeAndUnicastHostsFile, config) -File srcUnicastHostsFile = file('build/cluster/unicast_hosts.txt') - -// write the unicast_hosts.txt file to a temporary location to be used by the second cluster -setupSeedNodeAndUnicastHostsFile.doLast { - // write the unicast_hosts.txt file to a temp file in the build directory - srcUnicastHostsFile.setText(nodes.get(0).transportUri(), 'UTF-8') -} - -// second cluster, which will connect to the first via the unicast_hosts.txt file -integTestCluster { - dependsOn setupSeedNodeAndUnicastHostsFile - clusterName = 'discovery-file-test-cluster' - setting 'discovery.zen.hosts_provider', 'file' - extraConfigFile 'discovery-file/unicast_hosts.txt', srcUnicastHostsFile -} - -integTestRunner.finalizedBy ':plugins:discovery-file:initialCluster#stop' - diff --git a/plugins/discovery-file/config/discovery-file/unicast_hosts.txt b/plugins/discovery-file/config/discovery-file/unicast_hosts.txt deleted file mode 100644 index 5e265e0f295a9..0000000000000 --- a/plugins/discovery-file/config/discovery-file/unicast_hosts.txt +++ /dev/null @@ -1,20 +0,0 @@ -# The unicast_hosts.txt file contains the list of unicast hosts to connect to -# for pinging during the discovery process, when using the file-based discovery -# mechanism. This file should contain one entry per line, where an entry is a -# host/port combination. The host and port should be separated by a `:`. If -# the port is left off, a default port of 9300 is assumed. For example, if the -# cluster has three nodes that participate in the discovery process: -# (1) 66.77.88.99 running on port 9300 (2) 66.77.88.100 running on port 9305 -# and (3) 66.77.88.99 running on port 10005, then this file should contain the -# following text: -# -#10.10.10.5 -#10.10.10.6:9305 -#10.10.10.5:10005 -# -# For IPv6 addresses, make sure to put a bracket around the host part of the address, -# for example: [2001:cdba:0000:0000:0000:0000:3257:9652]:9301 (where 9301 is the port). -# -# NOTE: all lines starting with a `#` are comments, and comments must exist -# on lines of their own (i.e. comments cannot begin in the middle of a line) -# \ No newline at end of file diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java deleted file mode 100644 index 48fa49b9a8a35..0000000000000 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.file; - -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.transport.TransportService; - -import java.util.Collections; -import java.util.Map; -import java.util.function.Supplier; - -public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin { - - private final DeprecationLogger deprecationLogger; - static final String DEPRECATION_MESSAGE - = "File-based discovery is now built into Elasticsearch and does not require the discovery-file plugin"; - - public FileBasedDiscoveryPlugin(Settings settings) { - deprecationLogger = new DeprecationLogger(Loggers.getLogger(this.getClass(), settings)); - } - - @Override - public Map> getZenHostsProviders(TransportService transportService, - NetworkService networkService) { - deprecationLogger.deprecated(DEPRECATION_MESSAGE); - return Collections.emptyMap(); - } -} diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java deleted file mode 100644 index d2ac2095bdfc9..0000000000000 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.file; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -/** - * Integration tests to make sure the file-based discovery plugin works in a cluster. - */ -public class FileBasedDiscoveryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - - public FileBasedDiscoveryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); - } -} diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java deleted file mode 100644 index 643c7b2c95c27..0000000000000 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.file; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; - -import static org.elasticsearch.discovery.file.FileBasedDiscoveryPlugin.DEPRECATION_MESSAGE; - -public class FileBasedDiscoveryPluginDeprecationTests extends ESTestCase { - public void testDeprecationWarning() { - new FileBasedDiscoveryPlugin(Settings.EMPTY).getZenHostsProviders(null, null); - assertWarnings(DEPRECATION_MESSAGE); - } -} diff --git a/plugins/discovery-file/src/test/resources/rest-api-spec/test/discovery_file/10_basic.yml b/plugins/discovery-file/src/test/resources/rest-api-spec/test/discovery_file/10_basic.yml deleted file mode 100644 index 138115da1123f..0000000000000 --- a/plugins/discovery-file/src/test/resources/rest-api-spec/test/discovery_file/10_basic.yml +++ /dev/null @@ -1,13 +0,0 @@ -# Integration tests for file-based discovery -# -"Ensure cluster formed successfully with discovery file": - # make sure both nodes joined the cluster - - do: - cluster.health: - wait_for_nodes: 2 - - # make sure the cluster was formed with the correct name - - do: - cluster.state: {} - - - match: { cluster_name: 'discovery-file-test-cluster' } # correct cluster name, we formed the cluster we expected to diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index 7aeb03851a5d9..9a1ff6f2e2349 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -224,10 +224,6 @@ fi install_and_check_plugin discovery ec2 aws-java-sdk-core-*.jar } -@test "[$GROUP] install discovery-file plugin" { - install_and_check_plugin discovery file -} - @test "[$GROUP] install ingest-attachment plugin" { # we specify the version on the poi-3.17.jar so that the test does # not spuriously pass if the jar is missing but the other poi jars @@ -364,10 +360,6 @@ fi remove_plugin discovery-ec2 } -@test "[$GROUP] remove discovery-file plugin" { - remove_plugin discovery-file -} - @test "[$GROUP] remove ingest-attachment plugin" { remove_plugin ingest-attachment } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java index f339ae43a703e..f9b20580ecd9b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java @@ -49,23 +49,21 @@ public class FileBasedUnicastHostsProvider extends AbstractComponent implements public static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; private final Path unicastHostsFilePath; - private final Path legacyUnicastHostsFilePath; public FileBasedUnicastHostsProvider(Settings settings, Path configFile) { super(settings); this.unicastHostsFilePath = configFile.resolve(UNICAST_HOSTS_FILE); - this.legacyUnicastHostsFilePath = configFile.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); } private List getHostsList() { if (Files.exists(unicastHostsFilePath)) { - return readFileContents(unicastHostsFilePath); - } - - if (Files.exists(legacyUnicastHostsFilePath)) { - deprecationLogger.deprecated("Found dynamic hosts list at [{}] but this path is deprecated. This list should be at [{}] " + - "instead. Support for the deprecated path will be removed in future.", legacyUnicastHostsFilePath, unicastHostsFilePath); - return readFileContents(legacyUnicastHostsFilePath); + try (Stream lines = Files.lines(unicastHostsFilePath)) { + return lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments + .collect(Collectors.toList()); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("failed to read file [{}]", unicastHostsFilePath), e); + return Collections.emptyList(); + } } logger.warn("expected, but did not find, a dynamic hosts list at [{}]", unicastHostsFilePath); @@ -73,16 +71,6 @@ private List getHostsList() { return Collections.emptyList(); } - private List readFileContents(Path path) { - try (Stream lines = Files.lines(path)) { - return lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments - .collect(Collectors.toList()); - } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("failed to read file [{}]", unicastHostsFilePath), e); - return Collections.emptyList(); - } - } - @Override public List buildDynamicHosts(HostsResolver hostsResolver) { final List transportAddresses = hostsResolver.resolveHosts(getHostsList(), 1); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java index 8922a38ea1e78..b45daaadfa576 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java @@ -52,11 +52,9 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { - private boolean legacyLocation; private ThreadPool threadPool; private ExecutorService executorService; private MockTransportService transportService; - private Path configPath; @Before public void setUp() throws Exception { @@ -108,24 +106,12 @@ public void testBuildDynamicNodes() throws Exception { assertEquals(9300, nodes.get(2).getPort()); } - public void testBuildDynamicNodesLegacyLocation() throws Exception { - legacyLocation = true; - testBuildDynamicNodes(); - assertDeprecatedLocationWarning(); - } - public void testEmptyUnicastHostsFile() throws Exception { final List hostEntries = Collections.emptyList(); final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } - public void testEmptyUnicastHostsFileLegacyLocation() throws Exception { - legacyLocation = true; - testEmptyUnicastHostsFile(); - assertDeprecatedLocationWarning(); - } - public void testUnicastHostsDoesNotExist() { final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, createTempDir().toAbsolutePath()); @@ -141,12 +127,6 @@ public void testInvalidHostEntries() throws Exception { assertEquals(0, addresses.size()); } - public void testInvalidHostEntriesLegacyLocation() throws Exception { - legacyLocation = true; - testInvalidHostEntries(); - assertDeprecatedLocationWarning(); - } - public void testSomeInvalidHostEntries() throws Exception { final List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); final List addresses = setupAndRunHostProvider(hostEntries); @@ -155,12 +135,6 @@ public void testSomeInvalidHostEntries() throws Exception { assertEquals(9301, addresses.get(0).getPort()); } - public void testSomeInvalidHostEntriesLegacyLocation() throws Exception { - legacyLocation = true; - testSomeInvalidHostEntries(); - assertDeprecatedLocationWarning(); - } - // sets up the config dir, writes to the unicast hosts file in the config dir, // and then runs the file-based unicast host provider to get the list of discovery nodes private List setupAndRunHostProvider(final List hostEntries) throws IOException { @@ -168,15 +142,9 @@ private List setupAndRunHostProvider(final List hostEn final Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) .build(); - if (randomBoolean()) { - configPath = homeDir.resolve("config"); - } else { - configPath = createTempDir(); - } - final Path discoveryFilePath = legacyLocation ? configPath.resolve("discovery-file") : configPath; - Files.createDirectories(discoveryFilePath); - final Path unicastHostsPath = discoveryFilePath.resolve(UNICAST_HOSTS_FILE); - try (BufferedWriter writer = Files.newBufferedWriter(unicastHostsPath)) { + final Path configPath = randomBoolean() ? homeDir.resolve("config") : createTempDir(); + Files.createDirectories(configPath); + try (BufferedWriter writer = Files.newBufferedWriter(configPath.resolve(UNICAST_HOSTS_FILE))) { writer.write(String.join("\n", hostEntries)); } @@ -184,12 +152,4 @@ private List setupAndRunHostProvider(final List hostEn UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); } - - private void assertDeprecatedLocationWarning() { - assertWarnings("Found dynamic hosts list at [" + - configPath.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE) + - "] but this path is deprecated. This list should be at [" + - configPath.resolve(UNICAST_HOSTS_FILE) + - "] instead. Support for the deprecated path will be removed in future."); - } } From 0d4683850c069d973050880f63970c7fa57b72af Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Tue, 18 Sep 2018 14:12:18 +0300 Subject: [PATCH 16/46] Moved the problematic tests to the tests file that is not considered when certain locales are used (#33785) --- x-pack/qa/sql/src/main/resources/case-functions.sql-spec | 6 ++++++ .../qa/sql/src/main/resources/string-functions.sql-spec | 8 -------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/x-pack/qa/sql/src/main/resources/case-functions.sql-spec b/x-pack/qa/sql/src/main/resources/case-functions.sql-spec index 899d7cb0a6cb1..f18f9c7eaa18d 100644 --- a/x-pack/qa/sql/src/main/resources/case-functions.sql-spec +++ b/x-pack/qa/sql/src/main/resources/case-functions.sql-spec @@ -11,3 +11,9 @@ SELECT CONCAT(CONCAT(SUBSTRING("first_name",1,LENGTH("first_name")-2),UCASE(LEFT upperCasingTheSecondLetterFromTheRightFromFirstNameWithWhere SELECT CONCAT(CONCAT(SUBSTRING("first_name",1,LENGTH("first_name")-2),UCASE(LEFT(RIGHT("first_name",2),1))),RIGHT("first_name",1)) f, COUNT(*) c FROM "test_emp" WHERE CONCAT(CONCAT(SUBSTRING("first_name",1,LENGTH("first_name")-2),UCASE(LEFT(RIGHT("first_name",2),1))),RIGHT("first_name",1))='AlejandRo' GROUP BY CONCAT(CONCAT(SUBSTRING("first_name",1,LENGTH("first_name")-2),UCASE(LEFT(RIGHT("first_name",2),1))),RIGHT("first_name",1)) ORDER BY CONCAT(CONCAT(SUBSTRING("first_name",1,LENGTH("first_name")-2),UCASE(LEFT(RIGHT("first_name",2),1))),RIGHT("first_name",1)) LIMIT 10; + +ucaseInline1 +SELECT UCASE('ElAsTiC') upper; + +ucaseInline3 +SELECT UCASE(' elastic ') upper; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/string-functions.sql-spec b/x-pack/qa/sql/src/main/resources/string-functions.sql-spec index 8fe357804430b..f039e5c487e2f 100644 --- a/x-pack/qa/sql/src/main/resources/string-functions.sql-spec +++ b/x-pack/qa/sql/src/main/resources/string-functions.sql-spec @@ -157,17 +157,9 @@ SELECT SUBSTRING('Elasticsearch', 10, 10) sub; ucaseFilter SELECT UCASE(gender) uppercased, COUNT(*) count FROM "test_emp" WHERE UCASE(gender) = 'F' GROUP BY UCASE(gender); -//https://github.com/elastic/elasticsearch/issues/33687 -//ucaseInline1 -//SELECT UCASE('ElAsTiC') upper; - ucaseInline2 SELECT UCASE('') upper; -//https://github.com/elastic/elasticsearch/issues/33687 -//ucaseInline3 -//SELECT UCASE(' elastic ') upper; - // // Group and order by // From 8e0d74adadab17b8aff4dfdf9e5a37fcaffa145f Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 18 Sep 2018 12:56:37 +0100 Subject: [PATCH 17/46] [ML][HLRC] Remove deleted property from Job (#33763) The deleted property is meant to be used internally. Users of the client should not need interact with that property. --- .../client/ml/job/config/Job.java | 27 +++---------------- 1 file changed, 4 insertions(+), 23 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java index aff74271f1c0b..7740d8cfc5117 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java @@ -66,7 +66,6 @@ public class Job implements ToXContentObject { public static final ParseField RESULTS_RETENTION_DAYS = new ParseField("results_retention_days"); public static final ParseField MODEL_SNAPSHOT_ID = new ParseField("model_snapshot_id"); public static final ParseField RESULTS_INDEX_NAME = new ParseField("results_index_name"); - public static final ParseField DELETED = new ParseField("deleted"); public static final ObjectParser PARSER = new ObjectParser<>("job_details", true, Builder::new); @@ -100,7 +99,6 @@ public class Job implements ToXContentObject { PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), CUSTOM_SETTINGS, ValueType.OBJECT); PARSER.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID); PARSER.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME); - PARSER.declareBoolean(Builder::setDeleted, DELETED); } private final String jobId; @@ -123,14 +121,13 @@ public class Job implements ToXContentObject { private final Map customSettings; private final String modelSnapshotId; private final String resultsIndexName; - private final boolean deleted; private Job(String jobId, String jobType, List groups, String description, Date createTime, Date finishedTime, Date lastDataTime, Long establishedModelMemory, AnalysisConfig analysisConfig, AnalysisLimits analysisLimits, DataDescription dataDescription, ModelPlotConfig modelPlotConfig, Long renormalizationWindowDays, TimeValue backgroundPersistInterval, Long modelSnapshotRetentionDays, Long resultsRetentionDays, Map customSettings, - String modelSnapshotId, String resultsIndexName, boolean deleted) { + String modelSnapshotId, String resultsIndexName) { this.jobId = jobId; this.jobType = jobType; @@ -151,7 +148,6 @@ private Job(String jobId, String jobType, List groups, String descriptio this.customSettings = customSettings == null ? null : Collections.unmodifiableMap(customSettings); this.modelSnapshotId = modelSnapshotId; this.resultsIndexName = resultsIndexName; - this.deleted = deleted; } /** @@ -296,10 +292,6 @@ public String getModelSnapshotId() { return modelSnapshotId; } - public boolean isDeleted() { - return deleted; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -359,9 +351,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (resultsIndexName != null) { builder.field(RESULTS_INDEX_NAME.getPreferredName(), resultsIndexName); } - if (params.paramAsBoolean("all", false)) { - builder.field(DELETED.getPreferredName(), deleted); - } builder.endObject(); return builder; } @@ -395,8 +384,7 @@ public boolean equals(Object other) { && Objects.equals(this.resultsRetentionDays, that.resultsRetentionDays) && Objects.equals(this.customSettings, that.customSettings) && Objects.equals(this.modelSnapshotId, that.modelSnapshotId) - && Objects.equals(this.resultsIndexName, that.resultsIndexName) - && Objects.equals(this.deleted, that.deleted); + && Objects.equals(this.resultsIndexName, that.resultsIndexName); } @Override @@ -404,7 +392,7 @@ public int hashCode() { return Objects.hash(jobId, jobType, groups, description, createTime, finishedTime, lastDataTime, establishedModelMemory, analysisConfig, analysisLimits, dataDescription, modelPlotConfig, renormalizationWindowDays, backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, customSettings, - modelSnapshotId, resultsIndexName, deleted); + modelSnapshotId, resultsIndexName); } @Override @@ -437,7 +425,6 @@ public static class Builder { private Map customSettings; private String modelSnapshotId; private String resultsIndexName; - private boolean deleted; private Builder() { } @@ -466,7 +453,6 @@ public Builder(Job job) { this.customSettings = job.getCustomSettings(); this.modelSnapshotId = job.getModelSnapshotId(); this.resultsIndexName = job.getResultsIndexNameNoPrefix(); - this.deleted = job.isDeleted(); } public Builder setId(String id) { @@ -573,11 +559,6 @@ public Builder setResultsIndexName(String resultsIndexName) { return this; } - public Builder setDeleted(boolean deleted) { - this.deleted = deleted; - return this; - } - /** * Builds a job. * @@ -590,7 +571,7 @@ public Job build() { id, jobType, groups, description, createTime, finishedTime, lastDataTime, establishedModelMemory, analysisConfig, analysisLimits, dataDescription, modelPlotConfig, renormalizationWindowDays, backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, customSettings, - modelSnapshotId, resultsIndexName, deleted); + modelSnapshotId, resultsIndexName); } } } From 9fe5a273aac70d0685300de1d012566fbdada4f9 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 18 Sep 2018 15:55:16 +0200 Subject: [PATCH 18/46] [TEST] handle failed search requests differently --- .../elasticsearch/xpack/ccr/FollowIndexSecurityIT.java | 9 ++++++--- .../org/elasticsearch/xpack/ccr/FollowIndexIT.java | 10 +++++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index 60b9f8f23e8b3..a49ddd1dbef9e 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -274,11 +274,14 @@ private static void unfollowIndex(String followIndex) throws IOException { } private static void verifyCcrMonitoring(String expectedLeaderIndex, String expectedFollowerIndex) throws IOException { - ensureYellow(".monitoring-*"); - Request request = new Request("GET", "/.monitoring-*/_search"); request.setJsonEntity("{\"query\": {\"term\": {\"ccr_stats.leader_index\": \"leader_cluster:" + expectedLeaderIndex + "\"}}}"); - Map response = toMap(adminClient().performRequest(request)); + Map response; + try { + response = toMap(adminClient().performRequest(request)); + } catch (ResponseException e) { + throw new AssertionError("error while searching", e); + } int numberOfOperationsReceived = 0; int numberOfOperationsIndexed = 0; diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index c7ecbe184de88..73a15410b07ba 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -9,6 +9,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; @@ -160,11 +161,14 @@ private static void verifyDocuments(String index, int expectedNumDocs) throws IO } private static void verifyCcrMonitoring(final String expectedLeaderIndex, final String expectedFollowerIndex) throws IOException { - ensureYellow(".monitoring-*"); - Request request = new Request("GET", "/.monitoring-*/_search"); request.setJsonEntity("{\"query\": {\"term\": {\"ccr_stats.leader_index\": \"leader_cluster:" + expectedLeaderIndex + "\"}}}"); - Map response = toMap(client().performRequest(request)); + Map response; + try { + response = toMap(client().performRequest(request)); + } catch (ResponseException e) { + throw new AssertionError("error while searching", e); + } int numberOfOperationsReceived = 0; int numberOfOperationsIndexed = 0; From 3596512e6a3eeb74316f035d0dea1b31eafb60f3 Mon Sep 17 00:00:00 2001 From: Dan Tennery-Spalding Date: Tue, 18 Sep 2018 07:46:22 -0700 Subject: [PATCH 19/46] [DOCS] Corrected several grammar errors (#33781) --- docs/reference/search/request/sort.asciidoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index 2cee0f3a58cd6..544bea86b0dae 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -1,7 +1,7 @@ [[search-request-sort]] === Sort -Allows to add one or more sort on specific fields. Each sort can be +Allows you to add one or more sorts on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for `_score` to sort by score, and `_doc` to sort by index order. @@ -223,7 +223,7 @@ scripts and sorting by geo distance. ==== Missing Values The `missing` parameter specifies how docs which are missing -the field should be treated: The `missing` value can be +the sort field should be treated: The `missing` value can be set to `_last`, `_first`, or a custom value (that will be used for missing docs as the sort value). The default is `_last`. @@ -250,7 +250,7 @@ the `nested_filter` then a missing value is used. ==== Ignoring Unmapped Fields By default, the search request will fail if there is no mapping -associated with a field. The `unmapped_type` option allows to ignore +associated with a field. The `unmapped_type` option allows you to ignore fields that have no mapping and not sort by them. The value of this parameter is used to determine what sort values to emit. Here is an example of how it can be used: @@ -322,7 +322,7 @@ GET /_search `ignore_unmapped`:: Indicates if the unmapped field should be treated as a missing value. Setting it to `true` is equivalent to specifying - an `unmapped_type` in the field sort. The default is `false` (unmapped field are causing the search to fail). + an `unmapped_type` in the field sort. The default is `false` (unmapped field cause the search to fail). NOTE: geo distance sorting does not support configurable missing values: the distance will always be considered equal to +Infinity+ when a document does not From 32ee6148d2eb1865e491e162ad5bd7e595cf0bb8 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Tue, 18 Sep 2018 16:57:33 +0200 Subject: [PATCH 20/46] [DOCS] Clarify scoring for multi_match phrase type (#32672) The original statement "Runs a match_phrase query on each field and combines the _score from each field." for the phrase type is a but misleading. The phrase type behaves like the best_fields type and does not combine the scores of each fields. --- docs/reference/query-dsl/multi-match-query.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index edb6ff11da7de..296689db289d4 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -83,8 +83,8 @@ parameter, which can be set to: were one big field. Looks for each word in *any* field. See <>. -`phrase`:: Runs a `match_phrase` query on each field and combines - the `_score` from each field. See <>. +`phrase`:: Runs a `match_phrase` query on each field and uses the `_score` + from the best field. See <>. `phrase_prefix`:: Runs a `match_phrase_prefix` query on each field and combines the `_score` from each field. See <>. From 3928921a1d38ccff2bd215f2cccd5876b50503b8 Mon Sep 17 00:00:00 2001 From: Tim Heckel Date: Tue, 18 Sep 2018 09:59:26 -0500 Subject: [PATCH 21/46] [DOCS] Update scroll.asciidoc (#32530) --- docs/reference/search/request/scroll.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 0fd6979ef9568..c2d6dab550cc7 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -79,8 +79,8 @@ next batch of results until there are no more results left to return, ie the `hits` array is empty. IMPORTANT: The initial search request and each subsequent scroll request each -return a `_scroll_id`, which may change with each request -- only the most -recent `_scroll_id` should be used. +return a `_scroll_id`. While the `_scroll_id` may change between requests, it doesn’t +always change — in any case, only the most recently received `_scroll_id` should be used. NOTE: If the request specifies aggregations, only the initial search response will contain the aggregations results. From 7bed91549997a5f5736eae4bae1eb7f3f38843d7 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 18 Sep 2018 17:05:10 +0200 Subject: [PATCH 22/46] [DOCS] Fixed list formatting (#32963) --- .../resources/rest-api-spec/test/README.asciidoc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index 3ee0340387496..d4b04ce25110b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -19,9 +19,10 @@ Test file structure -------------------- A YAML test file consists of: -* an optional `setup` section, followed by -* an optional `teardown` section, followed by -* one or more test sections + +- an optional `setup` section, followed by +- an optional `teardown` section, followed by +- one or more test sections For instance: @@ -216,11 +217,13 @@ sent to nodes that match the `node_selector`. It looks like this: If you list multiple selectors then the request will only go to nodes that match all of those selectors. The following selectors are supported: -* `version`: Only nodes who's version is within the range will receive the + +- `version`: Only nodes who's version is within the range will receive the request. The syntax for the pattern is the same as when `version` is within `skip`. -* `attribute`: Only nodes that have an attribute matching the name and value -of the provided attribute match. Looks like: +- `attribute`: Only nodes that have an attribute matching the name and value +of the provided attribute match. +Looks like: .... node_selector: attribute: From 91e45ca21b9490fe46d39aa6240a289a59304ab8 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Tue, 18 Sep 2018 18:51:48 +0300 Subject: [PATCH 23/46] SQL: Better handling of number parsing exceptions (#33776) Add proper exceptions in case the parsing of numbers (too large, invalid format) fails. Close #33622 --- .../xpack/sql/parser/ExpressionBuilder.java | 42 ++++++++++++++++--- .../xpack/sql/parser/ExpressionTests.java | 19 +++++++++ 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index 0c7ecbc7ddf6b..2719d39bbecb2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -95,7 +95,7 @@ import org.joda.time.format.DateTimeFormatterBuilder; import org.joda.time.format.ISODateTimeFormat; -import java.math.BigDecimal; +import java.math.BigInteger; import java.util.List; import java.util.Locale; import java.util.Map; @@ -458,7 +458,13 @@ public Expression visitNullLiteral(NullLiteralContext ctx) { @Override public Expression visitBooleanLiteral(BooleanLiteralContext ctx) { - return new Literal(source(ctx), Booleans.parseBoolean(ctx.getText().toLowerCase(Locale.ROOT), false), DataType.BOOLEAN); + boolean value; + try { + value = Booleans.parseBoolean(ctx.getText().toLowerCase(Locale.ROOT), false); + } catch(IllegalArgumentException iae) { + throw new ParsingException(source(ctx), iae.getMessage()); + } + return new Literal(source(ctx), Boolean.valueOf(value), DataType.BOOLEAN); } @Override @@ -472,14 +478,40 @@ public Expression visitStringLiteral(StringLiteralContext ctx) { @Override public Literal visitDecimalLiteral(DecimalLiteralContext ctx) { - return new Literal(source(ctx), new BigDecimal(ctx.getText()).doubleValue(), DataType.DOUBLE); + double value; + try { + value = Double.parseDouble(ctx.getText()); + } catch (NumberFormatException nfe) { + throw new ParsingException(source(ctx), "Cannot parse number [{}]", ctx.getText()); + } + if (Double.isInfinite(value)) { + throw new ParsingException(source(ctx), "Number [{}] is too large", ctx.getText()); + } + if (Double.isNaN(value)) { + throw new ParsingException(source(ctx), "[{}] cannot be parsed as a number (NaN)", ctx.getText()); + } + return new Literal(source(ctx), Double.valueOf(value), DataType.DOUBLE); } @Override public Literal visitIntegerLiteral(IntegerLiteralContext ctx) { - BigDecimal bigD = new BigDecimal(ctx.getText()); + long value; + try { + value = Long.parseLong(ctx.getText()); + } catch (NumberFormatException nfe) { + try { + BigInteger bi = new BigInteger(ctx.getText()); + try { + bi.longValueExact(); + } catch (ArithmeticException ae) { + throw new ParsingException(source(ctx), "Number [{}] is too large", ctx.getText()); + } + } catch (NumberFormatException ex) { + // parsing fails, go through + } + throw new ParsingException(source(ctx), "Cannot parse number [{}]", ctx.getText()); + } - long value = bigD.longValueExact(); DataType type = DataType.LONG; // try to downsize to int if possible (since that's the most common type) if ((int) value == value) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java index 004118e8cd2d9..466e749c9a3cd 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java @@ -22,6 +22,15 @@ public void testTokenFunctionName() throws Exception { assertEquals("LEFT", uf.functionName()); } + + public void testLiteralBoolean() throws Exception { + Expression lt = parser.createExpression("TRUE"); + assertEquals(Literal.class, lt.getClass()); + Literal l = (Literal) lt; + assertEquals(Boolean.TRUE, l.value()); + assertEquals(DataType.BOOLEAN, l.dataType()); + } + public void testLiteralDouble() throws Exception { Expression lt = parser.createExpression(String.valueOf(Double.MAX_VALUE)); assertEquals(Literal.class, lt.getClass()); @@ -92,4 +101,14 @@ public void testLiteralIntegerWithByteValue() throws Exception { assertEquals(Integer.valueOf(Byte.MAX_VALUE), l.value()); assertEquals(DataType.INTEGER, l.dataType()); } + + public void testLiteralIntegerInvalid() throws Exception { + ParsingException ex = expectThrows(ParsingException.class, () -> parser.createExpression("123456789098765432101")); + assertEquals("Number [123456789098765432101] is too large", ex.getErrorMessage()); + } + + public void testLiteralDecimalTooBig() throws Exception { + ParsingException ex = expectThrows(ParsingException.class, () -> parser.createExpression("1.9976931348623157e+308")); + assertEquals("Number [1.9976931348623157e+308] is too large", ex.getErrorMessage()); + } } \ No newline at end of file From bc12a948b5c7914f479d0de4cf3d4e3fa7a3fcf2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 18 Sep 2018 12:04:35 -0400 Subject: [PATCH 24/46] Checkstyle: Package declarations (#33784) Make sure that all java files have a package declaration and that all of the package declarations line up with the directory structure. This would have caught the bug that I caused in 190ea9a6def9082348d983b16420ef02607d4c17 and fixed in b6d68bd805f1858a0210e381402236dea1d42509. --- buildSrc/src/main/resources/checkstyle.xml | 6 ++++++ buildSrc/src/main/resources/checkstyle_suppressions.xml | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/buildSrc/src/main/resources/checkstyle.xml b/buildSrc/src/main/resources/checkstyle.xml index e1000b3e4a9f8..939d48e72ce21 100644 --- a/buildSrc/src/main/resources/checkstyle.xml +++ b/buildSrc/src/main/resources/checkstyle.xml @@ -62,7 +62,13 @@ --> + + + + + + From 241c74efb26b3d4a774bc9e8b8450d029c91a83a Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 18 Sep 2018 18:16:40 +0200 Subject: [PATCH 25/46] upgrade to a new snapshot of Lucene 8 (7d0a7782fa) (#33812) --- buildSrc/version.properties | 2 +- .../lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + ...lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - ...lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + ...lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - ...lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + ...cene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - ...cene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + .../licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 | 1 - .../licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 | 1 + 49 files changed, 25 insertions(+), 25 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fee9a25aa352e..5b611980f1cb1 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 8.0.0-snapshot-66c671ea80 +lucene = 8.0.0-snapshot-7d0a7782fa # optional dependencies spatial4j = 0.7 diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 047bca7b614bf..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58b9db095c569b4c4da491810f14e1429878b594 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..4904c89e62f89 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +cc072b68aac06a2fb9569ab7adce05302f130948 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 7369f427ab208..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f009ee188453aabae77fad55aea08bc60323bb3e \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..abc772945b1b4 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +429eb7e780c5a6e5200041a1f5b98bccd2623aaf \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 16417bbebd1c2..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af3d2ae975e3560c1ea69222d6c46072857952ba \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..e103c8c0c7c41 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +837fca1b1d7ca1dc002e53171801526644e52818 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 9c3524a6789f8..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f17bc5e532d9dc2786a13bd577df64023d1baae1 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..b7a23ee518fcb --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +1dde903172ade259cb26cbe320c25bc1d1356f89 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index ac81fdd07c2e4..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ad89d33c1cd960c91afa05b22024137fe108567 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..08b07e7c2f498 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +b6ca20e96a989e6e6706b8b7b8ad8c82d2a03576 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index f00a29e781618..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f11fb254256d74e911b953994b47e7a95915954 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..3f6fed19af1aa --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +c96a2f25dea18b383423a41aca296734353d4bbd \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 76fa8e90eae98..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b2348d140ef0c3e674cb81173f61c5e5f430facb \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..5dc03672c8753 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +09363c5ce111d024a6da22a5ea8dbaf54d91dbd0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 0e2c4d34ef041..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -485a0c3be58a5942b4a28639f1019181ef4cd0e3 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..e940b50d640e1 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +13c3840d49480014118de99ef6e07a9e55c50172 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 72f7319e6af4a..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a22f1c6749ca4a3fbc9b330161a8ea3301cac8de \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..4d9522f10de5b --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +dce55e44af096cb9029cb26d22a14d8a9c5223ce \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index f4bf99b4a03a5..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -41ce415b93d75662cc2e790d09120bc0234d6b1b \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..c86294acf5a3e --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +d1d941758dc91ea7c2d515dd97b5d9b23b0f1874 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 50a21f5c504a2..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06c1e4fa838807059d27aaf5405cfdfe7303369c \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..75200bc0c1525 --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +e884b8ce62a2102b24bfdbe8911674cd5b0d06d9 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 76bdfa1c6c4bc..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5b0a019a938deb58160647e7640b348bb99c10a8 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..b1ae597fadfb7 --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +3870972c07d7fa41a3bc58eb65952da53a16a406 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 017225c0e467d..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d813f3ba0ddd56bac728edb88ed8875e6acfd18 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..02935671ce899 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +b8f0b73cfd01fc48735f1e06f16f7ccb47fc183e \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 29cdbbfe69f3c..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -00c7e20b6a35ebecc875dd52bfb324967c5555d6 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..fdfab321a6791 --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +1d253fae720355e2ff40d529d62c2b3de403d0d0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 49087293afa7c..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4dbff54a0befdc7d67c0f39890586c220df718e \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..d7c9cdf3e41d6 --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +d9ca14bcda331a425d2d7c16022fdfd1c6942924 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 3c12235dff678..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74d17f6bdf1fa4d499f02904432aa3b1024bde88 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..93ec704aeaeb0 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +200454bbfe5ec93d941d9a9d27703883122a4522 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index a423deb397de6..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bec78be38f777765146c35f65e247909563d6814 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..d57b6be7fbf31 --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +47915a125e54c845a4b540201cda88dc7612da08 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 79195ed1d5e1c..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74b76f8fed44400bc2a5d938ca2611a97b4d7a7c \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..0ed04b6f69b41 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +e5d49e1c6ee7550234539314e600e2893e13cb80 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index d5cd94b7fe5d6..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f65fa728b3bc924db6538f4c3caf2fcd25451cf \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..41c6a4a243ed7 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +68081b60905f1b53b3705b9cfa4403b8aba44352 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 76857b72f012b..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -916a91f0cab2d3684707c59e9adca7b3030b2c66 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..63734717b2fbc --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +c99d56a453cecc7258300fd04b438713b944f1b9 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 7ab84df992bc4..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb3e630d6013e41838fb277943ce921f256f1c61 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..3fa056da3db0a --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +2471966478f829b6455556346014f02ff59f50c0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index d793f4c54d9d1..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa10ff14eab2f579cff2f0fa33c9c7f3b24daf12 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..bd3d2e719a0ae --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +46e012be699251306ad13f4582c30d79cea4b307 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 0ea0c2fb573fd..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3dd65ca6612b4f98530847b99ab348fd83055fdf \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..8a4fc23cfcdae --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +dea19dd9e971d2a0171e7d78662f732b45148a27 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 deleted file mode 100644 index 50a21f5c504a2..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06c1e4fa838807059d27aaf5405cfdfe7303369c \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 new file mode 100644 index 0000000000000..75200bc0c1525 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 @@ -0,0 +1 @@ +e884b8ce62a2102b24bfdbe8911674cd5b0d06d9 \ No newline at end of file From 98ccd9496275591fb81b5a24e98f5bd7ac3dc442 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 18 Sep 2018 19:53:26 +0200 Subject: [PATCH 26/46] Factor out a ChannelActionListener (#33819) We use similar / same concepts in SerachTransportService and HandledTransportAction but both duplicate the efforts with slightly different implementation details. This streamlines sending responses / exceptions back to a channel in an ActionListener with appropriate logging. --- .../action/search/SearchTransportService.java | 22 +------ .../support/HandledTransportAction.java | 58 ++++++++++++------- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index a4ea2616e0a21..9db297f4b9247 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; @@ -348,25 +349,8 @@ public void onFailure(Exception e) { transportService.registerRequestHandler(QUERY_ACTION_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new, (request, channel, task) -> { - searchService.executeQueryPhase(request, (SearchTask) task, new ActionListener() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - channel.sendResponse(searchPhaseResult); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (IOException e1) { - throw new UncheckedIOException(e1); - } - } - }); + searchService.executeQueryPhase(request, (SearchTask) task, new HandledTransportAction.ChannelActionListener<>( + channel, QUERY_ACTION_NAME, request)); }); TransportActionProxy.registerProxyAction(transportService, QUERY_ACTION_NAME, (request) -> ((ShardSearchRequest)request).numberOfShards() == 1 ? QueryFetchSearchResult::new : QuerySearchResult::new); diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index c55e0cff6f250..9de040a98b405 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.action.support; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -27,7 +29,9 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -63,30 +67,44 @@ protected HandledTransportAction(Settings settings, String actionName, boolean c } class TransportHandler implements TransportRequestHandler { - @Override public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { // We already got the task created on the network layer - no need to create it again on the transport layer - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } + Logger logger = HandledTransportAction.this.logger; + execute(task, request, new ChannelActionListener<>(channel, actionName, request)); + } + } + + public static final class ChannelActionListener implements + ActionListener { + private final Logger logger = LogManager.getLogger(getClass()); + private final TransportChannel channel; + private final Request request; + private final String actionName; + + public ChannelActionListener(TransportChannel channel, String actionName, Request request) { + this.channel = channel; + this.request = request; + this.actionName = actionName; + } - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn(() -> new ParameterizedMessage( - "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); - } - } - }); + @Override + public void onResponse(Response response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn(() -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); + } } } From 9026c3ee92f1f13630a989023acd8c47829e3ec0 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 18 Sep 2018 19:53:42 +0200 Subject: [PATCH 27/46] Ensure realtime `_get` and `_termvectors` don't run on the network thread (#33814) The change in #27500 introduces this regression that causes `_get` and `_term_vector` actions to run on the network thread if the realtime flag is set. This fixes the issue by delegating to the super method forking on the corresponding threadpool. --- .../java/org/elasticsearch/action/get/TransportGetAction.java | 2 +- .../action/termvectors/TransportTermVectorsAction.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 0aeacb38ffa56..63d3d30e1e27f 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -81,7 +81,7 @@ protected void asyncShardOperation(GetRequest request, ShardId shardId, ActionLi IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); if (request.realtime()) { // we are not tied to a refresh cycle here anyway - listener.onResponse(shardOperation(request, shardId)); + super.asyncShardOperation(request, shardId, listener); } else { indexShard.awaitShardSearchActive(b -> { try { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java index a259f5b828a05..d2a6055bbe75a 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java @@ -90,7 +90,7 @@ protected void asyncShardOperation(TermVectorsRequest request, ShardId shardId, IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); if (request.realtime()) { // it's a realtime request which is not subject to refresh cycles - listener.onResponse(shardOperation(request, shardId)); + super.asyncShardOperation(request, shardId, listener); } else { indexShard.awaitShardSearchActive(b -> { try { From c6462057a15289bfb69a97441159b555b37d7d80 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 18 Sep 2018 20:43:31 +0200 Subject: [PATCH 28/46] MINOR: Remove Some Dead Code in Scripting (#33800) * The is default check method is not used in ScriptType * The removed vars on ExpressionSearchScript are unused --- .../expression/ExpressionSearchScript.java | 3 --- .../org/elasticsearch/script/ScriptType.java | 17 +++-------------- 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java index cb19a604623ab..6df2b33127de4 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java @@ -25,7 +25,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; -import org.apache.lucene.search.Scorer; import org.elasticsearch.script.GeneralScriptException; import org.elasticsearch.script.SearchScript; @@ -42,8 +41,6 @@ class ExpressionSearchScript implements SearchScript.LeafFactory { final DoubleValuesSource source; final ReplaceableConstDoubleValueSource specialValue; // _value final boolean needsScores; - Scorer scorer; - int docid; ExpressionSearchScript(Expression e, SimpleBindings b, ReplaceableConstDoubleValueSource v, boolean needsScores) { exprScript = e; diff --git a/server/src/main/java/org/elasticsearch/script/ScriptType.java b/server/src/main/java/org/elasticsearch/script/ScriptType.java index 2fdf283c57fb4..5d356bbd7cbcc 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptType.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptType.java @@ -41,7 +41,7 @@ public enum ScriptType implements Writeable { * (Groovy and others), but can be overridden by the specific {@link ScriptEngine} * if the language is naturally secure (Painless, Mustache, and Expressions). */ - INLINE ( 0 , new ParseField("source", "inline") , false ), + INLINE ( 0 , new ParseField("source", "inline")), /** * STORED scripts are saved as part of the {@link org.elasticsearch.cluster.ClusterState} @@ -50,7 +50,7 @@ public enum ScriptType implements Writeable { * (Groovy and others), but can be overridden by the specific {@link ScriptEngine} * if the language is naturally secure (Painless, Mustache, and Expressions). */ - STORED ( 1 , new ParseField("id", "stored") , false ); + STORED ( 1 , new ParseField("id", "stored")); /** * Reads an int from the input stream and converts it to a {@link ScriptType}. @@ -73,18 +73,15 @@ public static ScriptType readFrom(StreamInput in) throws IOException { private final int id; private final ParseField parseField; - private final boolean defaultEnabled; /** * Standard constructor. * @param id A unique identifier for a type that can be read/written to a stream. * @param parseField Specifies the name used to parse input from queries. - * @param defaultEnabled Whether or not a {@link ScriptType} can be run by default. */ - ScriptType(int id, ParseField parseField, boolean defaultEnabled) { + ScriptType(int id, ParseField parseField) { this.id = id; this.parseField = parseField; - this.defaultEnabled = defaultEnabled; } public void writeTo(StreamOutput out) throws IOException { @@ -112,14 +109,6 @@ public ParseField getParseField() { return parseField; } - /** - * @return Whether or not a {@link ScriptType} can be run by default. Note - * this can be potentially overridden by any {@link ScriptEngine}. - */ - public boolean isDefaultEnabled() { - return defaultEnabled; - } - /** * @return The same as calling {@link #getName()}. */ From f4cbbcf98b8b0acfdabbc2a8dfee28bd3536bbca Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 18 Sep 2018 15:25:20 -0400 Subject: [PATCH 29/46] Add ES version 6.4.2 (#33831) Version and properties files --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 01738930b4bcf..e6939edbd89cf 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -101,6 +101,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_4_1_ID = 6040199; public static final Version V_6_4_1 = new Version(V_6_4_1_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); + public static final int V_6_4_2_ID = 6040299; + public static final Version V_6_4_2 = new Version(V_6_4_2_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_7_0_0_alpha1_ID = 7000001; @@ -123,6 +125,8 @@ public static Version fromId(int id) { return V_7_0_0_alpha1; case V_6_5_0_ID: return V_6_5_0; + case V_6_4_2_ID: + return V_6_4_2; case V_6_4_1_ID: return V_6_4_1; case V_6_4_0_ID: From 805a12361fe30f940121627a8a9398e7d93cffef Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 18 Sep 2018 21:47:02 +0200 Subject: [PATCH 30/46] [CCR] Fail with a descriptive error if leader index does not exist (#33797) Closes #33737 --- .../org/elasticsearch/xpack/ccr/FollowIndexIT.java | 13 +++++++++++++ .../elasticsearch/xpack/ccr/CcrLicenseChecker.java | 6 ++++++ .../action/TransportCreateAndFollowIndexAction.java | 6 ++++++ .../org/elasticsearch/xpack/ccr/ShardChangesIT.java | 9 +++++++++ 4 files changed, 34 insertions(+) diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 73a15410b07ba..f108e0336915d 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -25,6 +25,7 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -82,6 +83,18 @@ public void testFollowIndex() throws Exception { } } + public void testFollowNonExistingLeaderIndex() throws Exception { + assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster); + ResponseException e = expectThrows(ResponseException.class, + () -> followIndex("leader_cluster:non-existing-index", "non-existing-index")); + assertThat(e.getMessage(), containsString("no such index")); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + + e = expectThrows(ResponseException.class, () -> createAndFollowIndex("leader_cluster:non-existing-index", "non-existing-index")); + assertThat(e.getMessage(), containsString("no such index")); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + public void testAutoFollowPatterns() throws Exception { assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java index f597871fc66e6..065b3ffd4f51b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; @@ -109,6 +110,11 @@ public void checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUU onFailure, leaderClusterState -> { IndexMetaData leaderIndexMetaData = leaderClusterState.getMetaData().index(leaderIndex); + if (leaderIndexMetaData == null) { + onFailure.accept(new IndexNotFoundException(leaderIndex)); + return; + } + final Client leaderClient = client.getRemoteClusterClient(clusterAlias); fetchLeaderHistoryUUIDs(leaderClient, leaderIndexMetaData, onFailure, historyUUIDs -> { consumer.accept(historyUUIDs, leaderIndexMetaData); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java index e795a90372941..fd421a9380b36 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; @@ -121,6 +122,11 @@ private void createFollowerIndexAndFollowLocalIndex( // following an index in local cluster, so use local cluster state to fetch leader index metadata final String leaderIndex = request.getFollowRequest().getLeaderIndex(); final IndexMetaData leaderIndexMetadata = state.getMetaData().index(leaderIndex); + if (leaderIndexMetadata == null) { + listener.onFailure(new IndexNotFoundException(leaderIndex)); + return; + } + Consumer handler = historyUUIDs -> { createFollowerIndex(leaderIndexMetadata, historyUUIDs, request, listener); }; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java index 3d1789389d775..472098dd4fa4b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java @@ -430,12 +430,21 @@ public void testFollowNonExistentIndex() throws Exception { // Leader index does not exist. FollowIndexAction.Request followRequest1 = createFollowRequest("non-existent-leader", "test-follower"); expectThrows(IndexNotFoundException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest1).actionGet()); + expectThrows(IndexNotFoundException.class, + () -> client().execute(CreateAndFollowIndexAction.INSTANCE, new CreateAndFollowIndexAction.Request(followRequest1)) + .actionGet()); // Follower index does not exist. FollowIndexAction.Request followRequest2 = createFollowRequest("non-test-leader", "non-existent-follower"); expectThrows(IndexNotFoundException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest2).actionGet()); + expectThrows(IndexNotFoundException.class, + () -> client().execute(CreateAndFollowIndexAction.INSTANCE, new CreateAndFollowIndexAction.Request(followRequest2)) + .actionGet()); // Both indices do not exist. FollowIndexAction.Request followRequest3 = createFollowRequest("non-existent-leader", "non-existent-follower"); expectThrows(IndexNotFoundException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest3).actionGet()); + expectThrows(IndexNotFoundException.class, + () -> client().execute(CreateAndFollowIndexAction.INSTANCE, new CreateAndFollowIndexAction.Request(followRequest3)) + .actionGet()); } public void testFollowIndex_lowMaxTranslogBytes() throws Exception { From 99513b306e84b22b83cefde4c07ed12f576ffc09 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 18 Sep 2018 13:11:46 -0700 Subject: [PATCH 31/46] Test: Relax jarhell gradle test (#33787) Gradle can sometimes emit mixed log lines due to how we spawn things in separate processes. This commit changes the jarhell integ test to only look for the exception and message, instead of including the outer part about the exception in thread main. closes #33774 --- .../java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java index 03f2022bc66e8..e5624a15d92df 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -34,7 +34,7 @@ public void testJarHellDetected() { assertTaskFailed(result, ":jarHell"); assertOutputContains( result.getOutput(), - "Exception in thread \"main\" java.lang.IllegalStateException: jar hell!", + "java.lang.IllegalStateException: jar hell!", "class: org.apache.logging.log4j.Logger" ); } From 0cf0d73813e7732234e2a89611f05b9faa165ad2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 19 Sep 2018 06:05:36 +0200 Subject: [PATCH 32/46] TESTS: Set SO_LINGER = 0 for MockNioTransport (#32560) * TESTS: Set SO_LINGER = 0 for MockNioTransport * Prevents lingering sockets in TIME_WAIT piling up during test runs and leading to port collisions that manifest as timeouts * Fixes #32552 --- .../main/java/org/elasticsearch/transport/MockTcpTransport.java | 1 + .../java/org/elasticsearch/transport/nio/MockNioTransport.java | 1 + 2 files changed, 2 insertions(+) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 996508bdb887a..99aa540b68411 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -182,6 +182,7 @@ protected MockChannel initiateChannel(DiscoveryNode node, ActionListener c executor.submit(() -> { try { socket.connect(address); + socket.setSoLinger(false, 0); channel.loopRead(executor); connectListener.onResponse(null); } catch (Exception ex) { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 19543cfdcbb15..89df9166431e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -193,6 +193,7 @@ public MockSocketChannel createChannel(NioSelector selector, SocketChannel chann BytesChannelContext context = new BytesChannelContext(nioChannel, selector, (e) -> exceptionCaught(nioChannel, e), readWriteHandler, new InboundChannelBuffer(pageSupplier)); nioChannel.setContext(context); + nioChannel.setSoLinger(0); return nioChannel; } From c6e3231ef38dda152c9fcc2b74519a9a1e33e0e9 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 19 Sep 2018 07:33:15 +0300 Subject: [PATCH 33/46] SQL: day and month name functions tests locale providers enforcement (#33653) DAYNAME and MONTHNAME functions tests will be skipped if the right JVM parameter (-Djava.locale.providers=COMPAT) is not used in unit testing environment --- .../datetime/NamedDateTimeProcessorTests.java | 35 +++++++++++++++---- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java index 828a16f5aa965..0f12ae05f86d4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java @@ -5,16 +5,20 @@ */ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; +import org.elasticsearch.bootstrap.JavaVersion; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import org.junit.Assume; import java.io.IOException; import java.util.TimeZone; public class NamedDateTimeProcessorTests extends AbstractWireSerializingTestCase { + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); public static NamedDateTimeProcessor randomNamedDateTimeProcessor() { @@ -37,21 +41,21 @@ protected NamedDateTimeProcessor mutateInstance(NamedDateTimeProcessor instance) return new NamedDateTimeProcessor(replaced, UTC); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33621") public void testValidDayNamesInUTC() { + assumeJava9PlusAndCompatLocaleProviderSetting(); NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, UTC); assertEquals("Thursday", proc.process("0")); assertEquals("Saturday", proc.process("-64164233612338")); assertEquals("Monday", proc.process("64164233612338")); - + assertEquals("Thursday", proc.process(new DateTime(0L, DateTimeZone.UTC))); assertEquals("Thursday", proc.process(new DateTime(-5400, 12, 25, 2, 0, DateTimeZone.UTC))); assertEquals("Friday", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); assertEquals("Tuesday", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33621") public void testValidDayNamesWithNonUTCTimeZone() { + assumeJava9PlusAndCompatLocaleProviderSetting(); NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, TimeZone.getTimeZone("GMT-10:00")); assertEquals("Wednesday", proc.process("0")); assertEquals("Friday", proc.process("-64164233612338")); @@ -64,9 +68,9 @@ public void testValidDayNamesWithNonUTCTimeZone() { assertEquals("Monday", proc.process(new DateTime(10902, 8, 22, 9, 59, DateTimeZone.UTC))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33621") public void testValidMonthNamesInUTC() { - NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, UTC); + assumeJava9PlusAndCompatLocaleProviderSetting(); + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, UTC); assertEquals("January", proc.process("0")); assertEquals("September", proc.process("-64164233612338")); assertEquals("April", proc.process("64164233612338")); @@ -77,8 +81,8 @@ public void testValidMonthNamesInUTC() { assertEquals("August", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33621") public void testValidMonthNamesWithNonUTCTimeZone() { + assumeJava9PlusAndCompatLocaleProviderSetting(); NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, TimeZone.getTimeZone("GMT-3:00")); assertEquals("December", proc.process("0")); assertEquals("August", proc.process("-64165813612338")); // GMT: Tuesday, September 1, -0064 2:53:07.662 AM @@ -90,4 +94,23 @@ public void testValidMonthNamesWithNonUTCTimeZone() { assertEquals("July", proc.process(new DateTime(10902, 8, 1, 2, 59, DateTimeZone.UTC))); assertEquals("August", proc.process(new DateTime(10902, 8, 1, 3, 00, DateTimeZone.UTC))); } + + /* + * This method checks the existence of a jvm parameter that should exist in ES jvm.options for Java 9+. If the parameter is + * missing, the tests will be skipped. Not doing this, the tests will fail because the day and month names will be in the narrow + * format (Mon, Tue, Jan, Feb etc) instead of full format (Monday, Tuesday, January, February etc). + * + * Related infra issue: https://github.com/elastic/elasticsearch/issues/33796 + */ + private void assumeJava9PlusAndCompatLocaleProviderSetting() { + // at least Java 9 + if (JavaVersion.current().compareTo(JavaVersion.parse("9")) < 0) { + return; + } + String beforeJava9CompatibleLocale = System.getProperty("java.locale.providers"); + // and COMPAT setting needs to be first on the list + boolean isBeforeJava9Compatible = beforeJava9CompatibleLocale != null + && Strings.tokenizeToStringArray(beforeJava9CompatibleLocale, ",")[0].equals("COMPAT"); + Assume.assumeTrue(isBeforeJava9Compatible); + } } From 013b64a07c0f1580352f6188b0dc300ce97c5057 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 19 Sep 2018 07:18:24 +0200 Subject: [PATCH 34/46] [CCR] Change FollowIndexAction.Request class to be more user friendly (#33810) Instead of having one constructor that accepts all arguments, all parameters should be provided via setters. Only leader and follower index are required arguments. This makes using this class in tests and transport client easier. --- .../xpack/ccr/FollowIndexSecurityIT.java | 4 +- .../xpack/ccr/FollowIndexIT.java | 4 +- .../ccr/action/AutoFollowCoordinator.java | 16 +- .../xpack/ccr/action/ShardChangesAction.java | 5 +- .../action/TransportFollowIndexAction.java | 87 ++++++- .../elasticsearch/xpack/ccr/CcrLicenseIT.java | 16 +- .../xpack/ccr/ShardChangesIT.java | 45 ++-- .../ccr/action/FollowIndexRequestTests.java | 39 ++- .../ShardFollowNodeTaskRandomTests.java | 3 +- .../core/ccr/action/FollowIndexAction.java | 238 +++++++++--------- 10 files changed, 267 insertions(+), 190 deletions(-) diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index a49ddd1dbef9e..85913c2611455 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -199,13 +199,13 @@ private static void refresh(String index) throws IOException { private static void followIndex(String leaderIndex, String followIndex) throws IOException { final Request request = new Request("POST", "/" + followIndex + "/_ccr/follow"); - request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"poll_timeout\": \"10ms\"}"); assertOK(client().performRequest(request)); } private static void createAndFollowIndex(String leaderIndex, String followIndex) throws IOException { final Request request = new Request("POST", "/" + followIndex + "/_ccr/create_and_follow"); - request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"poll_timeout\": \"10ms\"}"); assertOK(client().performRequest(request)); } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index f108e0336915d..29184c117bf40 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -141,13 +141,13 @@ private static void refresh(String index) throws IOException { private static void followIndex(String leaderIndex, String followIndex) throws IOException { final Request request = new Request("POST", "/" + followIndex + "/_ccr/follow"); - request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"poll_timeout\": \"10ms\"}"); assertOK(client().performRequest(request)); } private static void createAndFollowIndex(String leaderIndex, String followIndex) throws IOException { final Request request = new Request("POST", "/" + followIndex + "/_ccr/create_and_follow"); - request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"poll_timeout\": \"10ms\"}"); assertOK(client().performRequest(request)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 3a524e5724980..46679d22520c3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -297,12 +297,16 @@ private void followLeaderIndex(String clusterAlias, Index indexToFollow, String leaderIndexNameWithClusterAliasPrefix = clusterAlias.equals("_local_") ? leaderIndexName : clusterAlias + ":" + leaderIndexName; - FollowIndexAction.Request request = - new FollowIndexAction.Request(leaderIndexNameWithClusterAliasPrefix, followIndexName, - pattern.getMaxBatchOperationCount(), pattern.getMaxConcurrentReadBatches(), - pattern.getMaxOperationSizeInBytes(), pattern.getMaxConcurrentWriteBatches(), - pattern.getMaxWriteBufferSize(), pattern.getMaxRetryDelay(), - pattern.getIdleShardRetryDelay()); + FollowIndexAction.Request request = new FollowIndexAction.Request(); + request.setLeaderIndex(leaderIndexNameWithClusterAliasPrefix); + request.setFollowerIndex(followIndexName); + request.setMaxBatchOperationCount(pattern.getMaxBatchOperationCount()); + request.setMaxConcurrentReadBatches(pattern.getMaxConcurrentReadBatches()); + request.setMaxOperationSizeInBytes(pattern.getMaxOperationSizeInBytes()); + request.setMaxConcurrentWriteBatches(pattern.getMaxConcurrentWriteBatches()); + request.setMaxWriteBufferSize(pattern.getMaxWriteBufferSize()); + request.setMaxRetryDelay(pattern.getMaxRetryDelay()); + request.setPollTimeout(pattern.getIdleShardRetryDelay()); // Execute if the create and follow api call succeeds: Runnable successHandler = () -> { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index bf2bbd5af8a5c..937ca0a009613 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.io.IOException; import java.util.ArrayList; @@ -64,8 +63,8 @@ public static class Request extends SingleShardRequest { private int maxOperationCount; private ShardId shardId; private String expectedHistoryUUID; - private TimeValue pollTimeout = FollowIndexAction.DEFAULT_POLL_TIMEOUT; - private long maxOperationSizeInBytes = FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; + private TimeValue pollTimeout = TransportFollowIndexAction.DEFAULT_POLL_TIMEOUT; + private long maxOperationSizeInBytes = TransportFollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; public Request(ShardId shardId, String expectedHistoryUUID) { super(shardId.getIndexName()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java index eccda262636d2..e9ee38fd1f9e2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingSlowLog; @@ -55,6 +56,14 @@ public class TransportFollowIndexAction extends HandledTransportAction { + static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; + private static final TimeValue DEFAULT_MAX_RETRY_DELAY = new TimeValue(500); + private static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; + private static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; + private static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; + private static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; + static final TimeValue DEFAULT_POLL_TIMEOUT = TimeValue.timeValueMinutes(1); + private final Client client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -179,19 +188,8 @@ void start( String[] recordedLeaderShardHistoryUUIDs = extractIndexShardHistoryUUIDs(ccrIndexMetadata); String recordedLeaderShardHistoryUUID = recordedLeaderShardHistoryUUIDs[shardId]; - ShardFollowTask shardFollowTask = new ShardFollowTask( - clusterNameAlias, - new ShardId(followIndexMetadata.getIndex(), shardId), - new ShardId(leaderIndexMetadata.getIndex(), shardId), - request.getMaxBatchOperationCount(), - request.getMaxConcurrentReadBatches(), - request.getMaxOperationSizeInBytes(), - request.getMaxConcurrentWriteBatches(), - request.getMaxWriteBufferSize(), - request.getMaxRetryDelay(), - request.getPollTimeout(), - recordedLeaderShardHistoryUUID, - filteredHeaders); + final ShardFollowTask shardFollowTask = createShardFollowTask(shardId, clusterNameAlias, request, + leaderIndexMetadata, followIndexMetadata, recordedLeaderShardHistoryUUID, filteredHeaders); persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, new ActionListener>() { @Override @@ -299,6 +297,69 @@ static void validate( followerMapperService.merge(leaderIndex, MapperService.MergeReason.MAPPING_RECOVERY); } + private static ShardFollowTask createShardFollowTask( + int shardId, + String clusterAliasName, + FollowIndexAction.Request request, + IndexMetaData leaderIndexMetadata, + IndexMetaData followIndexMetadata, + String recordedLeaderShardHistoryUUID, + Map filteredHeaders + ) { + int maxBatchOperationCount; + if (request.getMaxBatchOperationCount() != null) { + maxBatchOperationCount = request.getMaxBatchOperationCount(); + } else { + maxBatchOperationCount = DEFAULT_MAX_BATCH_OPERATION_COUNT; + } + + int maxConcurrentReadBatches; + if (request.getMaxConcurrentReadBatches() != null){ + maxConcurrentReadBatches = request.getMaxConcurrentReadBatches(); + } else { + maxConcurrentReadBatches = DEFAULT_MAX_CONCURRENT_READ_BATCHES; + } + + long maxOperationSizeInBytes; + if (request.getMaxOperationSizeInBytes() != null) { + maxOperationSizeInBytes = request.getMaxOperationSizeInBytes(); + } else { + maxOperationSizeInBytes = DEFAULT_MAX_BATCH_SIZE_IN_BYTES; + } + + int maxConcurrentWriteBatches; + if (request.getMaxConcurrentWriteBatches() != null) { + maxConcurrentWriteBatches = request.getMaxConcurrentWriteBatches(); + } else { + maxConcurrentWriteBatches = DEFAULT_MAX_CONCURRENT_WRITE_BATCHES; + } + + int maxWriteBufferSize; + if (request.getMaxWriteBufferSize() != null) { + maxWriteBufferSize = request.getMaxWriteBufferSize(); + } else { + maxWriteBufferSize = DEFAULT_MAX_WRITE_BUFFER_SIZE; + } + + TimeValue maxRetryDelay = request.getMaxRetryDelay() == null ? DEFAULT_MAX_RETRY_DELAY : request.getMaxRetryDelay(); + TimeValue pollTimeout = request.getPollTimeout() == null ? DEFAULT_POLL_TIMEOUT : request.getPollTimeout(); + + return new ShardFollowTask( + clusterAliasName, + new ShardId(followIndexMetadata.getIndex(), shardId), + new ShardId(leaderIndexMetadata.getIndex(), shardId), + maxBatchOperationCount, + maxConcurrentReadBatches, + maxOperationSizeInBytes, + maxConcurrentWriteBatches, + maxWriteBufferSize, + maxRetryDelay, + pollTimeout, + recordedLeaderShardHistoryUUID, + filteredHeaders + ); + } + private static String[] extractIndexShardHistoryUUIDs(Map ccrIndexMetaData) { String historyUUIDs = ccrIndexMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS); return historyUUIDs.split(","); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java index 1e7e3fe42df27..a74b1e33cd26e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -192,16 +192,12 @@ private void assertNonCompliantLicense(final Exception e) { } private FollowIndexAction.Request getFollowRequest() { - return new FollowIndexAction.Request( - "leader", - "follower", - FollowIndexAction.DEFAULT_MAX_BATCH_OPERATION_COUNT, - FollowIndexAction.DEFAULT_MAX_CONCURRENT_READ_BATCHES, - FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, - FollowIndexAction.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, - FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, - TimeValue.timeValueMillis(10), - TimeValue.timeValueMillis(10)); + FollowIndexAction.Request request = new FollowIndexAction.Request(); + request.setLeaderIndex("leader"); + request.setFollowerIndex("follower"); + request.setMaxRetryDelay(TimeValue.timeValueMillis(10)); + request.setPollTimeout(TimeValue.timeValueMillis(10)); + return request; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java index 472098dd4fa4b..78715654a05e3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java @@ -319,9 +319,11 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) long numDocsIndexed = Math.min(3000 * 2, randomLongBetween(maxReadSize, maxReadSize * 10)); atLeastDocsIndexed("index1", numDocsIndexed / 3); - final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", maxReadSize, - randomIntBetween(2, 10), Long.MAX_VALUE, randomIntBetween(2, 10), - randomIntBetween(1024, 10240), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + followRequest.setMaxBatchOperationCount(maxReadSize); + followRequest.setMaxConcurrentReadBatches(randomIntBetween(2, 10)); + followRequest.setMaxConcurrentWriteBatches(randomIntBetween(2, 10)); + followRequest.setMaxWriteBufferSize(randomIntBetween(1024, 10240)); CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); @@ -358,9 +360,10 @@ public void testFollowIndexAndCloseNode() throws Exception { }); thread.start(); - final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", randomIntBetween(32, 2048), - randomIntBetween(2, 10), Long.MAX_VALUE, randomIntBetween(2, 10), - FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + followRequest.setMaxBatchOperationCount(randomIntBetween(32, 2048)); + followRequest.setMaxConcurrentReadBatches(randomIntBetween(2, 10)); + followRequest.setMaxConcurrentWriteBatches(randomIntBetween(2, 10)); client().execute(CreateAndFollowIndexAction.INSTANCE, new CreateAndFollowIndexAction.Request(followRequest)).get(); long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getMaxBatchOperationCount(), @@ -447,7 +450,7 @@ public void testFollowNonExistentIndex() throws Exception { .actionGet()); } - public void testFollowIndex_lowMaxTranslogBytes() throws Exception { + public void testFollowIndexMaxOperationSizeInBytes() throws Exception { final String leaderIndexSettings = getIndexSettings(1, between(0, 1), singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); @@ -460,8 +463,8 @@ public void testFollowIndex_lowMaxTranslogBytes() throws Exception { client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); } - final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", 1024, 1, 1024L, - 1, 10240, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + followRequest.setMaxOperationSizeInBytes(1L); final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); @@ -489,25 +492,21 @@ public void testDontFollowTheWrongIndex() throws Exception { assertAcked(client().admin().indices().prepareCreate("index3").setSource(leaderIndexSettings, XContentType.JSON)); ensureGreen("index3"); - FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", 1024, 1, 1024L, - 1, 10240, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); - followRequest = new FollowIndexAction.Request("index3", "index4", 1024, 1, 1024L, - 1, 10240, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + followRequest = createFollowRequest("index3", "index4"); createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); unfollowIndex("index2", "index4"); - FollowIndexAction.Request wrongRequest1 = new FollowIndexAction.Request("index1", "index4", 1024, 1, 1024L, - 1, 10240, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + FollowIndexAction.Request wrongRequest1 = createFollowRequest("index1", "index4"); Exception e = expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, wrongRequest1).actionGet()); assertThat(e.getMessage(), containsString("follow index [index4] should reference")); - FollowIndexAction.Request wrongRequest2 = new FollowIndexAction.Request("index3", "index2", 1024, 1, 1024L, - 1, 10240, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + FollowIndexAction.Request wrongRequest2 = createFollowRequest("index3", "index2"); e = expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, wrongRequest2).actionGet()); assertThat(e.getMessage(), containsString("follow index [index2] should reference")); } @@ -716,10 +715,12 @@ private void assertSameDocCount(String index1, String index2) throws Exception { }, 60, TimeUnit.SECONDS); } - public static FollowIndexAction.Request createFollowRequest(String leaderIndex, String followIndex) { - return new FollowIndexAction.Request(leaderIndex, followIndex, FollowIndexAction.DEFAULT_MAX_BATCH_OPERATION_COUNT, - FollowIndexAction.DEFAULT_MAX_CONCURRENT_READ_BATCHES, FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, - FollowIndexAction.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, - TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10)); + public static FollowIndexAction.Request createFollowRequest(String leaderIndex, String followerIndex) { + FollowIndexAction.Request request = new FollowIndexAction.Request(); + request.setLeaderIndex(leaderIndex); + request.setFollowerIndex(followerIndex); + request.setMaxRetryDelay(TimeValue.timeValueMillis(10)); + request.setPollTimeout(TimeValue.timeValueMillis(10)); + return request; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java index e5f7e693a7f1c..2bff73d223b57 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java @@ -40,24 +40,49 @@ protected boolean supportsUnknownFields() { } static FollowIndexAction.Request createTestRequest() { - return new FollowIndexAction.Request(randomAlphaOfLength(4), randomAlphaOfLength(4), randomIntBetween(1, Integer.MAX_VALUE), - randomIntBetween(1, Integer.MAX_VALUE), randomNonNegativeLong(), randomIntBetween(1, Integer.MAX_VALUE), - randomIntBetween(1, Integer.MAX_VALUE), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(500)); + FollowIndexAction.Request request = new FollowIndexAction.Request(); + request.setLeaderIndex(randomAlphaOfLength(4)); + request.setFollowerIndex(randomAlphaOfLength(4)); + if (randomBoolean()) { + request.setMaxBatchOperationCount(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxConcurrentReadBatches(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxConcurrentWriteBatches(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxOperationSizeInBytes(randomNonNegativeLong()); + } + if (randomBoolean()) { + request.setMaxWriteBufferSize(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); + } + if (randomBoolean()) { + request.setPollTimeout(TimeValue.timeValueMillis(500)); + } + return request; } public void testValidate() { - FollowIndexAction.Request request = new FollowIndexAction.Request("index1", "index2", null, null, null, null, - null, TimeValue.ZERO, null); + FollowIndexAction.Request request = new FollowIndexAction.Request(); + request.setLeaderIndex("index1"); + request.setFollowerIndex("index2"); + request.setMaxRetryDelay(TimeValue.ZERO); + ActionRequestValidationException validationException = request.validate(); assertThat(validationException, notNullValue()); assertThat(validationException.getMessage(), containsString("[max_retry_delay] must be positive but was [0ms]")); - request = new FollowIndexAction.Request("index1", "index2", null, null, null, null, null, TimeValue.timeValueMinutes(10), null); + request.setMaxRetryDelay(TimeValue.timeValueMinutes(10)); validationException = request.validate(); assertThat(validationException, notNullValue()); assertThat(validationException.getMessage(), containsString("[max_retry_delay] must be less than [5m] but was [10m]")); - request = new FollowIndexAction.Request("index1", "index2", null, null, null, null, null, TimeValue.timeValueMinutes(1), null); + request.setMaxRetryDelay(TimeValue.timeValueMinutes(1)); validationException = request.validate(); assertThat(validationException, nullValue()); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index b0181de812a38..e7d0987223bb9 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; -import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.nio.charset.StandardCharsets; @@ -81,7 +80,7 @@ private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testR new ShardId("leader_index", "", 0), testRun.maxOperationCount, concurrency, - FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + TransportFollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, concurrency, 10240, TimeValue.timeValueMillis(10), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java index 65136b41a29e0..c90ef63862b9a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -30,14 +29,7 @@ public final class FollowIndexAction extends Action { public static final FollowIndexAction INSTANCE = new FollowIndexAction(); public static final String NAME = "cluster:admin/xpack/ccr/follow_index"; - public static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; - public static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; - public static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; - public static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; - public static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; - static final TimeValue DEFAULT_MAX_RETRY_DELAY = new TimeValue(500); - static final TimeValue MAX_RETRY_DELAY = TimeValue.timeValueMinutes(5); - public static final TimeValue DEFAULT_POLL_TIMEOUT = TimeValue.timeValueMinutes(1); + public static final TimeValue MAX_RETRY_DELAY = TimeValue.timeValueMinutes(5); private FollowIndexAction() { super(NAME); @@ -59,30 +51,23 @@ public static class Request extends ActionRequest implements ToXContentObject { private static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); private static final ParseField MAX_RETRY_DELAY_FIELD = new ParseField("max_retry_delay"); private static final ParseField POLL_TIMEOUT = new ParseField("poll_timeout"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, - (args, followerIndex) -> { - if (args[1] != null) { - followerIndex = (String) args[1]; - } - return new Request((String) args[0], followerIndex, (Integer) args[2], (Integer) args[3], (Long) args[4], - (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]); - }); + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); static { - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_INDEX_FIELD); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOWER_INDEX_FIELD); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_OPERATION_COUNT); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_READ_BATCHES); - PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_SIZE_IN_BYTES); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_WRITE_BATCHES); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_SIZE); + PARSER.declareString(Request::setLeaderIndex, LEADER_INDEX_FIELD); + PARSER.declareString(Request::setFollowerIndex, FOLLOWER_INDEX_FIELD); + PARSER.declareInt(Request::setMaxBatchOperationCount, MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(Request::setMaxConcurrentReadBatches, MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(Request::setMaxOperationSizeInBytes, MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(Request::setMaxConcurrentWriteBatches, MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(Request::setMaxWriteBufferSize, MAX_WRITE_BUFFER_SIZE); PARSER.declareField( - ConstructingObjectParser.optionalConstructorArg(), + Request::setMaxRetryDelay, (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY_FIELD.getPreferredName()), MAX_RETRY_DELAY_FIELD, ObjectParser.ValueType.STRING); PARSER.declareField( - ConstructingObjectParser.optionalConstructorArg(), + Request::setPollTimeout, (p, c) -> TimeValue.parseTimeValue(p.text(), POLL_TIMEOUT.getPreferredName()), POLL_TIMEOUT, ObjectParser.ValueType.STRING); @@ -108,6 +93,9 @@ public String getLeaderIndex() { return leaderIndex; } + public void setLeaderIndex(String leaderIndex) { + this.leaderIndex = leaderIndex; + } private String followerIndex; @@ -115,38 +103,66 @@ public String getFollowerIndex() { return followerIndex; } - private int maxBatchOperationCount; + public void setFollowerIndex(String followerIndex) { + this.followerIndex = followerIndex; + } + + private Integer maxBatchOperationCount; - public int getMaxBatchOperationCount() { + public Integer getMaxBatchOperationCount() { return maxBatchOperationCount; } - private int maxConcurrentReadBatches; + public void setMaxBatchOperationCount(Integer maxBatchOperationCount) { + this.maxBatchOperationCount = maxBatchOperationCount; + } - public int getMaxConcurrentReadBatches() { + private Integer maxConcurrentReadBatches; + + public Integer getMaxConcurrentReadBatches() { return maxConcurrentReadBatches; } - private long maxOperationSizeInBytes; + public void setMaxConcurrentReadBatches(Integer maxConcurrentReadBatches) { + this.maxConcurrentReadBatches = maxConcurrentReadBatches; + } + + private Long maxOperationSizeInBytes; - public long getMaxOperationSizeInBytes() { + public Long getMaxOperationSizeInBytes() { return maxOperationSizeInBytes; } - private int maxConcurrentWriteBatches; + public void setMaxOperationSizeInBytes(Long maxOperationSizeInBytes) { + this.maxOperationSizeInBytes = maxOperationSizeInBytes; + } + + private Integer maxConcurrentWriteBatches; - public int getMaxConcurrentWriteBatches() { + public Integer getMaxConcurrentWriteBatches() { return maxConcurrentWriteBatches; } - private int maxWriteBufferSize; + public void setMaxConcurrentWriteBatches(Integer maxConcurrentWriteBatches) { + this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + } + + private Integer maxWriteBufferSize; - public int getMaxWriteBufferSize() { + public Integer getMaxWriteBufferSize() { return maxWriteBufferSize; } + public void setMaxWriteBufferSize(Integer maxWriteBufferSize) { + this.maxWriteBufferSize = maxWriteBufferSize; + } + private TimeValue maxRetryDelay; + public void setMaxRetryDelay(TimeValue maxRetryDelay) { + this.maxRetryDelay = maxRetryDelay; + } + public TimeValue getMaxRetryDelay() { return maxRetryDelay; } @@ -157,88 +173,50 @@ public TimeValue getPollTimeout() { return pollTimeout; } - public Request( - final String leaderIndex, - final String followerIndex, - final Integer maxBatchOperationCount, - final Integer maxConcurrentReadBatches, - final Long maxOperationSizeInBytes, - final Integer maxConcurrentWriteBatches, - final Integer maxWriteBufferSize, - final TimeValue maxRetryDelay, - final TimeValue pollTimeout) { + public void setPollTimeout(TimeValue pollTimeout) { + this.pollTimeout = pollTimeout; + } + + public Request() { + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException e = null; if (leaderIndex == null) { - throw new IllegalArgumentException(LEADER_INDEX_FIELD.getPreferredName() + " is missing"); + e = addValidationError(LEADER_INDEX_FIELD.getPreferredName() + " is missing", e); } - if (followerIndex == null) { - throw new IllegalArgumentException(FOLLOWER_INDEX_FIELD.getPreferredName() + " is missing"); + e = addValidationError(FOLLOWER_INDEX_FIELD.getPreferredName() + " is missing", e); } - - final int actualMaxBatchOperationCount = - maxBatchOperationCount == null ? DEFAULT_MAX_BATCH_OPERATION_COUNT : maxBatchOperationCount; - if (actualMaxBatchOperationCount < 1) { - throw new IllegalArgumentException(MAX_BATCH_OPERATION_COUNT.getPreferredName() + " must be larger than 0"); + if (maxBatchOperationCount != null && maxBatchOperationCount < 1) { + e = addValidationError(MAX_BATCH_OPERATION_COUNT.getPreferredName() + " must be larger than 0", e); } - - final int actualMaxConcurrentReadBatches = - maxConcurrentReadBatches == null ? DEFAULT_MAX_CONCURRENT_READ_BATCHES : maxConcurrentReadBatches; - if (actualMaxConcurrentReadBatches < 1) { - throw new IllegalArgumentException(MAX_CONCURRENT_READ_BATCHES.getPreferredName() + " must be larger than 0"); + if (maxConcurrentReadBatches != null && maxConcurrentReadBatches < 1) { + e = addValidationError(MAX_CONCURRENT_READ_BATCHES.getPreferredName() + " must be larger than 0", e); } - - final long actualMaxOperationSizeInBytes = - maxOperationSizeInBytes == null ? DEFAULT_MAX_BATCH_SIZE_IN_BYTES : maxOperationSizeInBytes; - if (actualMaxOperationSizeInBytes <= 0) { - throw new IllegalArgumentException(MAX_BATCH_SIZE_IN_BYTES.getPreferredName() + " must be larger than 0"); + if (maxOperationSizeInBytes != null && maxOperationSizeInBytes <= 0) { + e = addValidationError(MAX_BATCH_SIZE_IN_BYTES.getPreferredName() + " must be larger than 0", e); } - - final int actualMaxConcurrentWriteBatches = - maxConcurrentWriteBatches == null ? DEFAULT_MAX_CONCURRENT_WRITE_BATCHES : maxConcurrentWriteBatches; - if (actualMaxConcurrentWriteBatches < 1) { - throw new IllegalArgumentException(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName() + " must be larger than 0"); + if (maxConcurrentWriteBatches != null && maxConcurrentWriteBatches < 1) { + e = addValidationError(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName() + " must be larger than 0", e); } - - final int actualMaxWriteBufferSize = maxWriteBufferSize == null ? DEFAULT_MAX_WRITE_BUFFER_SIZE : maxWriteBufferSize; - if (actualMaxWriteBufferSize < 1) { - throw new IllegalArgumentException(MAX_WRITE_BUFFER_SIZE.getPreferredName() + " must be larger than 0"); + if (maxWriteBufferSize != null && maxWriteBufferSize < 1) { + e = addValidationError(MAX_WRITE_BUFFER_SIZE.getPreferredName() + " must be larger than 0", e); } - - final TimeValue actualRetryTimeout = maxRetryDelay == null ? DEFAULT_MAX_RETRY_DELAY : maxRetryDelay; - final TimeValue actualPollTimeout = pollTimeout == null ? DEFAULT_POLL_TIMEOUT : pollTimeout; - - this.leaderIndex = leaderIndex; - this.followerIndex = followerIndex; - this.maxBatchOperationCount = actualMaxBatchOperationCount; - this.maxConcurrentReadBatches = actualMaxConcurrentReadBatches; - this.maxOperationSizeInBytes = actualMaxOperationSizeInBytes; - this.maxConcurrentWriteBatches = actualMaxConcurrentWriteBatches; - this.maxWriteBufferSize = actualMaxWriteBufferSize; - this.maxRetryDelay = actualRetryTimeout; - this.pollTimeout = actualPollTimeout; - } - - public Request() { - - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - - if (maxRetryDelay.millis() <= 0) { + if (maxRetryDelay != null && maxRetryDelay.millis() <= 0) { String message = "[" + MAX_RETRY_DELAY_FIELD.getPreferredName() + "] must be positive but was [" + maxRetryDelay.getStringRep() + "]"; - validationException = addValidationError(message, validationException); + e = addValidationError(message, e); } - if (maxRetryDelay.millis() > FollowIndexAction.MAX_RETRY_DELAY.millis()) { + if (maxRetryDelay != null && maxRetryDelay.millis() > FollowIndexAction.MAX_RETRY_DELAY.millis()) { String message = "[" + MAX_RETRY_DELAY_FIELD.getPreferredName() + "] must be less than [" + MAX_RETRY_DELAY + "] but was [" + maxRetryDelay.getStringRep() + "]"; - validationException = addValidationError(message, validationException); + e = addValidationError(message, e); } - return validationException; + return e; } @Override @@ -246,11 +224,11 @@ public void readFrom(final StreamInput in) throws IOException { super.readFrom(in); leaderIndex = in.readString(); followerIndex = in.readString(); - maxBatchOperationCount = in.readVInt(); - maxConcurrentReadBatches = in.readVInt(); - maxOperationSizeInBytes = in.readVLong(); - maxConcurrentWriteBatches = in.readVInt(); - maxWriteBufferSize = in.readVInt(); + maxBatchOperationCount = in.readOptionalVInt(); + maxConcurrentReadBatches = in.readOptionalVInt(); + maxOperationSizeInBytes = in.readOptionalLong(); + maxConcurrentWriteBatches = in.readOptionalVInt(); + maxWriteBufferSize = in.readOptionalVInt(); maxRetryDelay = in.readOptionalTimeValue(); pollTimeout = in.readOptionalTimeValue(); } @@ -260,11 +238,11 @@ public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); out.writeString(leaderIndex); out.writeString(followerIndex); - out.writeVInt(maxBatchOperationCount); - out.writeVInt(maxConcurrentReadBatches); - out.writeVLong(maxOperationSizeInBytes); - out.writeVInt(maxConcurrentWriteBatches); - out.writeVInt(maxWriteBufferSize); + out.writeOptionalVInt(maxBatchOperationCount); + out.writeOptionalVInt(maxConcurrentReadBatches); + out.writeOptionalLong(maxOperationSizeInBytes); + out.writeOptionalVInt(maxConcurrentWriteBatches); + out.writeOptionalVInt(maxWriteBufferSize); out.writeOptionalTimeValue(maxRetryDelay); out.writeOptionalTimeValue(pollTimeout); } @@ -275,13 +253,27 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa { builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); - builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); - builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); - builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); - builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); - builder.field(MAX_RETRY_DELAY_FIELD.getPreferredName(), maxRetryDelay.getStringRep()); - builder.field(POLL_TIMEOUT.getPreferredName(), pollTimeout.getStringRep()); + if (maxBatchOperationCount != null) { + builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + } + if (maxOperationSizeInBytes != null) { + builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); + } + if (maxWriteBufferSize != null) { + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + } + if (maxConcurrentReadBatches != null) { + builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + } + if (maxConcurrentWriteBatches != null) { + builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + } + if (maxRetryDelay != null) { + builder.field(MAX_RETRY_DELAY_FIELD.getPreferredName(), maxRetryDelay.getStringRep()); + } + if (pollTimeout != null) { + builder.field(POLL_TIMEOUT.getPreferredName(), pollTimeout.getStringRep()); + } } builder.endObject(); return builder; @@ -292,11 +284,11 @@ public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Request request = (Request) o; - return maxBatchOperationCount == request.maxBatchOperationCount && - maxConcurrentReadBatches == request.maxConcurrentReadBatches && - maxOperationSizeInBytes == request.maxOperationSizeInBytes && - maxConcurrentWriteBatches == request.maxConcurrentWriteBatches && - maxWriteBufferSize == request.maxWriteBufferSize && + return Objects.equals(maxBatchOperationCount, request.maxBatchOperationCount) && + Objects.equals(maxConcurrentReadBatches, request.maxConcurrentReadBatches) && + Objects.equals(maxOperationSizeInBytes, request.maxOperationSizeInBytes) && + Objects.equals(maxConcurrentWriteBatches, request.maxConcurrentWriteBatches) && + Objects.equals(maxWriteBufferSize, request.maxWriteBufferSize) && Objects.equals(maxRetryDelay, request.maxRetryDelay) && Objects.equals(pollTimeout, request.pollTimeout) && Objects.equals(leaderIndex, request.leaderIndex) && From 7f473b683d4f7426d3aa13cb618b6330a09ba309 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 18 Sep 2018 23:32:16 -0700 Subject: [PATCH 35/46] =?UTF-8?q?Profiler:=20Don=E2=80=99t=20profile=20NEX?= =?UTF-8?q?TDOC=20for=20ConstantScoreQuery.=20(#33196)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Profiler: Don’t profile NEXTDOC for ConstantScoreQuery. A ConstantScore query will return the iterator of its inner query. However, when profiling, the constant score query is wrapped separately from its inner query, which distorts the times emitted by the profiler. Return the iterator directly in such a case. Closes #23430 --- .../profile/AbstractProfileBreakdown.java | 4 + .../search/profile/query/ProfileScorer.java | 31 +++ .../profile/query/QueryProfilerTests.java | 230 ++++++++++++++++++ 3 files changed, 265 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/search/profile/AbstractProfileBreakdown.java b/server/src/main/java/org/elasticsearch/search/profile/AbstractProfileBreakdown.java index f49ad4a8718a0..654c67af444ea 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/AbstractProfileBreakdown.java +++ b/server/src/main/java/org/elasticsearch/search/profile/AbstractProfileBreakdown.java @@ -49,6 +49,10 @@ public Timer getTimer(T timing) { return timings[timing.ordinal()]; } + public void setTimer(T timing, Timer timer) { + timings[timing.ordinal()] = timer; + } + /** Convert this record to a map from timingType to times. */ public Map toTimingMap() { Map map = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java index 7899750461e52..ab8fb5dbcae02 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.profile.query; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -36,7 +38,10 @@ final class ProfileScorer extends Scorer { private final Scorer scorer; private ProfileWeight profileWeight; + private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer, shallowAdvanceTimer, computeMaxScoreTimer; + private final boolean isConstantScoreQuery; + ProfileScorer(ProfileWeight w, Scorer scorer, QueryProfileBreakdown profile) throws IOException { super(w); @@ -48,6 +53,26 @@ final class ProfileScorer extends Scorer { matchTimer = profile.getTimer(QueryTimingType.MATCH); shallowAdvanceTimer = profile.getTimer(QueryTimingType.SHALLOW_ADVANCE); computeMaxScoreTimer = profile.getTimer(QueryTimingType.COMPUTE_MAX_SCORE); + ProfileScorer profileScorer = null; + if (w.getQuery() instanceof ConstantScoreQuery && scorer instanceof ProfileScorer) { + //Case when we have a totalHits query and it is not cached + profileScorer = (ProfileScorer) scorer; + } else if (w.getQuery() instanceof ConstantScoreQuery && scorer.getChildren().size() == 1) { + //Case when we have a top N query. If the scorer has no children, it is because it is cached + //and in that case we do not do any special treatment + Scorable childScorer = scorer.getChildren().iterator().next().child; + if (childScorer instanceof ProfileScorer) { + profileScorer = (ProfileScorer) childScorer; + } + } + if (profileScorer != null) { + isConstantScoreQuery = true; + profile.setTimer(QueryTimingType.NEXT_DOC, profileScorer.nextDocTimer); + profile.setTimer(QueryTimingType.ADVANCE, profileScorer.advanceTimer); + profile.setTimer(QueryTimingType.MATCH, profileScorer.matchTimer); + } else { + isConstantScoreQuery = false; + } } @Override @@ -77,6 +102,9 @@ public Collection getChildren() throws IOException { @Override public DocIdSetIterator iterator() { + if (isConstantScoreQuery) { + return scorer.iterator(); + } final DocIdSetIterator in = scorer.iterator(); return new DocIdSetIterator() { @@ -114,6 +142,9 @@ public long cost() { @Override public TwoPhaseIterator twoPhaseIterator() { + if (isConstantScoreQuery) { + return scorer.twoPhaseIterator(); + } final TwoPhaseIterator in = scorer.twoPhaseIterator(); if (in == null) { return null; diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java index fd924ce07ca93..ba58a79953be1 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java @@ -28,10 +28,12 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.RandomApproximationQuery; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -118,6 +120,209 @@ public void testBasic() throws IOException { assertThat(rewriteTime, greaterThan(0L)); } + public void testConstantScoreQuery() throws IOException { + QueryProfiler profiler = new QueryProfiler(); + searcher.setProfiler(profiler); + Query query = new ConstantScoreQuery(new TermQuery(new Term("foo", "bar"))); + searcher.search(query, 1); + List results = profiler.getTree(); + assertEquals(1, results.size()); + Map breakdownConstantScoreQuery = results.get(0).getTimeBreakdown(); + assertEquals(1, results.get(0).getProfiledChildren().size()); + Map breakdownTermQuery = results.get(0).getProfiledChildren().get(0).getTimeBreakdown(); + + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.SCORE.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.MATCH.toString()).longValue(), equalTo(0L)); + + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.BUILD_SCORER.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.ADVANCE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.SCORE.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.MATCH.toString() + "_count").longValue(), equalTo(0L)); + + assertThat(breakdownTermQuery.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.MATCH.toString()).longValue(), equalTo(0L)); + + assertThat(breakdownTermQuery.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.BUILD_SCORER.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.ADVANCE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.SCORE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.MATCH.toString() + "_count").longValue(), equalTo(0L)); + + assertEquals(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), + breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue()); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testConstantScoreTotalHitsBeingCachedQuery() throws IOException { + Query query = new ConstantScoreQuery(new TermQuery(new Term("foo", "bar"))); + //clean cache and make sure queries will be cached + searcher.setQueryCache(IndexSearcher.getDefaultQueryCache()); + searcher.setQueryCachingPolicy(ALWAYS_CACHE_POLICY); + + QueryProfiler profiler = new QueryProfiler(); + searcher.setProfiler(profiler); + TotalHitCountCollector collector = new TotalHitCountCollector(); + searcher.search(query, collector); + + List results = profiler.getTree(); + assertEquals(1, results.size()); + Map breakdownConstantScoreQuery = results.get(0).getTimeBreakdown(); + assertEquals(1, results.get(0).getProfiledChildren().size()); + Map breakdownTermQuery = results.get(0).getProfiledChildren().get(0).getTimeBreakdown(); + //In this case scorers for constant score query and term query are disconnected. + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.MATCH.toString()).longValue(), equalTo(0L)); + + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.BUILD_SCORER.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.ADVANCE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.SCORE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.MATCH.toString() + "_count").longValue(), equalTo(0L)); + + assertThat(breakdownTermQuery.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.MATCH.toString()).longValue(), equalTo(0L)); + + assertThat(breakdownTermQuery.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.BUILD_SCORER.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.ADVANCE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.SCORE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.MATCH.toString() + "_count").longValue(), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testConstantScoreTotalHitsNotCachedQuery() throws IOException { + Query query = new ConstantScoreQuery(new TermQuery(new Term("foo", "bar"))); + + //clean cache and make sure queries will not be cached + searcher.setQueryCache(IndexSearcher.getDefaultQueryCache()); + searcher.setQueryCachingPolicy(NEVER_CACHE_POLICY); + + QueryProfiler profiler = new QueryProfiler(); + searcher.setProfiler(profiler); + TotalHitCountCollector collector = new TotalHitCountCollector(); + searcher.search(query, collector); + + List results = profiler.getTree(); + assertEquals(1, results.size()); + Map breakdownConstantScoreQuery = results.get(0).getTimeBreakdown(); + assertEquals(1, results.get(0).getProfiledChildren().size()); + Map breakdownTermQuery = results.get(0).getProfiledChildren().get(0).getTimeBreakdown(); + //Timing from the scorer of term query are inherited by constant score query scorer. + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.MATCH.toString()).longValue(), equalTo(0L)); + + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.BUILD_SCORER.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.ADVANCE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.SCORE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.MATCH.toString() + "_count").longValue(), equalTo(0L)); + + assertThat(breakdownTermQuery.get(QueryTimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.MATCH.toString()).longValue(), equalTo(0L)); + + assertThat(breakdownTermQuery.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.BUILD_SCORER.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.ADVANCE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.SCORE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.MATCH.toString() + "_count").longValue(), equalTo(0L)); + + + assertEquals(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), + breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue()); + + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testConstantScoreTotalHitsCachedQuery() throws IOException { + Query query = new ConstantScoreQuery(new TermQuery(new Term("foo", "bar"))); + + //clean cache and make sure queries will be cached + searcher.setQueryCache(IndexSearcher.getDefaultQueryCache()); + searcher.setQueryCachingPolicy(ALWAYS_CACHE_POLICY); + //Put query on cache + TotalHitCountCollector collector = new TotalHitCountCollector(); + searcher.search(query, collector); + + QueryProfiler profiler = new QueryProfiler(); + searcher.setProfiler(profiler); + collector = new TotalHitCountCollector(); + searcher.search(query, collector); + + List results = profiler.getTree(); + assertEquals(1, results.size()); + Map breakdownConstantScoreQuery = results.get(0).getTimeBreakdown(); + assertEquals(1, results.get(0).getProfiledChildren().size()); + Map breakdownTermQuery = results.get(0).getProfiledChildren().get(0).getTimeBreakdown(); + + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.MATCH.toString()).longValue(), equalTo(0L)); + + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.BUILD_SCORER.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.NEXT_DOC.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.ADVANCE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.SCORE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownConstantScoreQuery.get(QueryTimingType.MATCH.toString() + "_count").longValue(), equalTo(0L)); + + assertThat(breakdownTermQuery.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.BUILD_SCORER.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.MATCH.toString()).longValue(), equalTo(0L)); + + assertThat(breakdownTermQuery.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count").longValue(), greaterThan(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.BUILD_SCORER.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.NEXT_DOC.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.ADVANCE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.SCORE.toString() + "_count").longValue(), equalTo(0L)); + assertThat(breakdownTermQuery.get(QueryTimingType.MATCH.toString() + "_count").longValue(), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testNoScoring() throws IOException { QueryProfiler profiler = new QueryProfiler(); searcher.setProfiler(profiler); @@ -276,4 +481,29 @@ public void testScorerSupplier() throws IOException { reader.close(); dir.close(); } + + private static final QueryCachingPolicy ALWAYS_CACHE_POLICY = new QueryCachingPolicy() { + + @Override + public void onUse(Query query) {} + + @Override + public boolean shouldCache(Query query) throws IOException { + return true; + } + + }; + + private static final QueryCachingPolicy NEVER_CACHE_POLICY = new QueryCachingPolicy() { + + @Override + public void onUse(Query query) {} + + @Override + public boolean shouldCache(Query query) throws IOException { + return false; + } + + }; + } From c4261bab44742724b2e04c43a4abef3b7cba08c0 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 19 Sep 2018 09:19:13 +0200 Subject: [PATCH 36/46] Add minimal sanity checks to custom/scripted similarities. (#33564) Add minimal sanity checks to custom/scripted similarities. Lucene 8 introduced more constraints on similarities, in particular: - scores must not be negative, - scores must not decrease when term freq increases, - scores must not increase when norm (interpreted as an unsigned long) increases. We can't check every single case, but could at least run some sanity checks. Relates #33309 --- .../NonNegativeScoresSimilarity.java | 96 ++++++++++++++++++ .../index/similarity/SimilarityService.java | 99 ++++++++++++++++++- .../elasticsearch/index/IndexModuleTests.java | 11 ++- .../NonNegativeScoresSimilarityTests.java | 57 +++++++++++ .../similarity/SimilarityServiceTests.java | 78 +++++++++++++++ .../indices/IndicesServiceTests.java | 8 +- 6 files changed, 339 insertions(+), 10 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/similarity/NonNegativeScoresSimilarity.java create mode 100644 server/src/test/java/org/elasticsearch/index/similarity/NonNegativeScoresSimilarityTests.java diff --git a/server/src/main/java/org/elasticsearch/index/similarity/NonNegativeScoresSimilarity.java b/server/src/main/java/org/elasticsearch/index/similarity/NonNegativeScoresSimilarity.java new file mode 100644 index 0000000000000..319ac0ff4b283 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/similarity/NonNegativeScoresSimilarity.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.apache.lucene.index.FieldInvertState; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.similarities.Similarity; + +/** + * A {@link Similarity} that rejects negative scores. This class exists so that users get + * an error instead of silently corrupt top hits. It should be applied to any custom or + * scripted similarity. + */ +// public for testing +public final class NonNegativeScoresSimilarity extends Similarity { + + // Escape hatch + private static final String ES_ENFORCE_POSITIVE_SCORES = "es.enforce.positive.scores"; + private static final boolean ENFORCE_POSITIVE_SCORES; + static { + String enforcePositiveScores = System.getProperty(ES_ENFORCE_POSITIVE_SCORES); + if (enforcePositiveScores == null) { + ENFORCE_POSITIVE_SCORES = true; + } else if ("false".equals(enforcePositiveScores)) { + ENFORCE_POSITIVE_SCORES = false; + } else { + throw new IllegalArgumentException(ES_ENFORCE_POSITIVE_SCORES + " may only be unset or set to [false], but got [" + + enforcePositiveScores + "]"); + } + } + + private final Similarity in; + + public NonNegativeScoresSimilarity(Similarity in) { + this.in = in; + } + + public Similarity getDelegate() { + return in; + } + + @Override + public long computeNorm(FieldInvertState state) { + return in.computeNorm(state); + } + + @Override + public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + final SimScorer inScorer = in.scorer(boost, collectionStats, termStats); + return new SimScorer() { + + @Override + public float score(float freq, long norm) { + float score = inScorer.score(freq, norm); + if (score < 0f) { + if (ENFORCE_POSITIVE_SCORES) { + throw new IllegalArgumentException("Similarities must not produce negative scores, but got:\n" + + inScorer.explain(Explanation.match(freq, "term frequency"), norm)); + } else { + return 0f; + } + } + return score; + } + + @Override + public Explanation explain(Explanation freq, long norm) { + Explanation expl = inScorer.explain(freq, norm); + if (expl.isMatch() && expl.getValue().floatValue() < 0) { + expl = Explanation.match(0f, "max of:", + expl, Explanation.match(0f, "Minimum allowed score")); + } + return expl; + } + }; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index eaed2169f11c0..06a476e64ec7a 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -19,15 +19,22 @@ package org.elasticsearch.index.similarity; +import org.apache.logging.log4j.LogManager; +import org.apache.lucene.index.FieldInvertState; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.search.similarities.Similarity.SimScorer; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexModule; @@ -44,7 +51,7 @@ public final class SimilarityService extends AbstractIndexComponent { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SimilarityService.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(SimilarityService.class)); public static final String DEFAULT_SIMILARITY = "BM25"; private static final String CLASSIC_SIMILARITY = "classic"; private static final Map>> DEFAULTS; @@ -131,8 +138,14 @@ public SimilarityService(IndexSettings indexSettings, ScriptService scriptServic } TriFunction defaultFactory = BUILT_IN.get(typeName); TriFunction factory = similarities.getOrDefault(typeName, defaultFactory); - final Similarity similarity = factory.apply(providerSettings, indexSettings.getIndexVersionCreated(), scriptService); - providers.put(name, () -> similarity); + Similarity similarity = factory.apply(providerSettings, indexSettings.getIndexVersionCreated(), scriptService); + validateSimilarity(indexSettings.getIndexVersionCreated(), similarity); + if (BUILT_IN.containsKey(typeName) == false || "scripted".equals(typeName)) { + // We don't trust custom similarities + similarity = new NonNegativeScoresSimilarity(similarity); + } + final Similarity similarityF = similarity; // like similarity but final + providers.put(name, () -> similarityF); } for (Map.Entry>> entry : DEFAULTS.entrySet()) { providers.put(entry.getKey(), entry.getValue().apply(indexSettings.getIndexVersionCreated())); @@ -151,7 +164,7 @@ public Similarity similarity(MapperService mapperService) { defaultSimilarity; } - + public SimilarityProvider getSimilarity(String name) { Supplier sim = similarities.get(name); if (sim == null) { @@ -182,4 +195,80 @@ public Similarity get(String name) { return (fieldType != null && fieldType.similarity() != null) ? fieldType.similarity().get() : defaultSimilarity; } } + + static void validateSimilarity(Version indexCreatedVersion, Similarity similarity) { + validateScoresArePositive(indexCreatedVersion, similarity); + validateScoresDoNotDecreaseWithFreq(indexCreatedVersion, similarity); + validateScoresDoNotIncreaseWithNorm(indexCreatedVersion, similarity); + } + + private static void validateScoresArePositive(Version indexCreatedVersion, Similarity similarity) { + CollectionStatistics collectionStats = new CollectionStatistics("some_field", 1200, 1100, 3000, 2000); + TermStatistics termStats = new TermStatistics(new BytesRef("some_value"), 100, 130); + SimScorer scorer = similarity.scorer(2f, collectionStats, termStats); + FieldInvertState state = new FieldInvertState(indexCreatedVersion.major, "some_field", + IndexOptions.DOCS_AND_FREQS, 20, 20, 0, 50, 10, 3); // length = 20, no overlap + final long norm = similarity.computeNorm(state); + for (int freq = 1; freq <= 10; ++freq) { + float score = scorer.score(freq, norm); + if (score < 0) { + fail(indexCreatedVersion, "Similarities should not return negative scores:\n" + + scorer.explain(Explanation.match(freq, "term freq"), norm)); + } + } + } + + private static void validateScoresDoNotDecreaseWithFreq(Version indexCreatedVersion, Similarity similarity) { + CollectionStatistics collectionStats = new CollectionStatistics("some_field", 1200, 1100, 3000, 2000); + TermStatistics termStats = new TermStatistics(new BytesRef("some_value"), 100, 130); + SimScorer scorer = similarity.scorer(2f, collectionStats, termStats); + FieldInvertState state = new FieldInvertState(indexCreatedVersion.major, "some_field", + IndexOptions.DOCS_AND_FREQS, 20, 20, 0, 50, 10, 3); // length = 20, no overlap + final long norm = similarity.computeNorm(state); + float previousScore = 0; + for (int freq = 1; freq <= 10; ++freq) { + float score = scorer.score(freq, norm); + if (score < previousScore) { + fail(indexCreatedVersion, "Similarity scores should not decrease when term frequency increases:\n" + + scorer.explain(Explanation.match(freq - 1, "term freq"), norm) + "\n" + + scorer.explain(Explanation.match(freq, "term freq"), norm)); + } + previousScore = score; + } + } + + private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVersion, Similarity similarity) { + CollectionStatistics collectionStats = new CollectionStatistics("some_field", 1200, 1100, 3000, 2000); + TermStatistics termStats = new TermStatistics(new BytesRef("some_value"), 100, 130); + SimScorer scorer = similarity.scorer(2f, collectionStats, termStats); + + long previousNorm = 0; + float previousScore = Float.MAX_VALUE; + for (int length = 1; length <= 10; ++length) { + FieldInvertState state = new FieldInvertState(indexCreatedVersion.major, "some_field", + IndexOptions.DOCS_AND_FREQS, length, length, 0, 50, 10, 3); // length = 20, no overlap + final long norm = similarity.computeNorm(state); + if (Long.compareUnsigned(previousNorm, norm) > 0) { + // esoteric similarity, skip this check + break; + } + float score = scorer.score(1, norm); + if (score > previousScore) { + fail(indexCreatedVersion, "Similarity scores should not increase when norm increases:\n" + + scorer.explain(Explanation.match(1, "term freq"), norm - 1) + "\n" + + scorer.explain(Explanation.match(1, "term freq"), norm)); + } + previousScore = score; + previousNorm = norm; + } + } + + private static void fail(Version indexCreatedVersion, String message) { + if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException(message); + } else if (indexCreatedVersion.onOrAfter(Version.V_6_5_0)) { + DEPRECATION_LOGGER.deprecated(message); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 078ec5ec20abc..a1166029146e6 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -59,6 +59,7 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.similarity.NonNegativeScoresSimilarity; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesModule; @@ -77,6 +78,7 @@ import org.elasticsearch.test.engine.MockEngineFactory; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collections; @@ -295,10 +297,13 @@ public void testAddSimilarity() throws IOException { IndexService indexService = newIndexService(module); SimilarityService similarityService = indexService.similarityService(); - assertNotNull(similarityService.getSimilarity("my_similarity")); - assertTrue(similarityService.getSimilarity("my_similarity").get() instanceof TestSimilarity); + Similarity similarity = similarityService.getSimilarity("my_similarity").get(); + assertNotNull(similarity); + assertThat(similarity, Matchers.instanceOf(NonNegativeScoresSimilarity.class)); + similarity = ((NonNegativeScoresSimilarity) similarity).getDelegate(); + assertThat(similarity, Matchers.instanceOf(TestSimilarity.class)); assertEquals("my_similarity", similarityService.getSimilarity("my_similarity").name()); - assertEquals("there is a key", ((TestSimilarity) similarityService.getSimilarity("my_similarity").get()).key); + assertEquals("there is a key", ((TestSimilarity) similarity).key); indexService.close("simon says", false); } diff --git a/server/src/test/java/org/elasticsearch/index/similarity/NonNegativeScoresSimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/NonNegativeScoresSimilarityTests.java new file mode 100644 index 0000000000000..33528c2190051 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/similarity/NonNegativeScoresSimilarityTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.apache.lucene.index.FieldInvertState; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.search.similarities.Similarity.SimScorer; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +public class NonNegativeScoresSimilarityTests extends ESTestCase { + + public void testBasics() { + Similarity negativeScoresSim = new Similarity() { + + @Override + public long computeNorm(FieldInvertState state) { + return state.getLength(); + } + + @Override + public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + return new SimScorer() { + @Override + public float score(float freq, long norm) { + return freq - 5; + } + }; + } + }; + Similarity assertingSimilarity = new NonNegativeScoresSimilarity(negativeScoresSim); + SimScorer scorer = assertingSimilarity.scorer(1f, null); + assertEquals(2f, scorer.score(7f, 1L), 0f); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> scorer.score(2f, 1L)); + assertThat(e.getMessage(), Matchers.containsString("Similarities must not produce negative scores")); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index 5d18a595e9687..48d1e2b9c9b6c 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -18,12 +18,18 @@ */ package org.elasticsearch.index.similarity; +import org.apache.lucene.index.FieldInvertState; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.hamcrest.Matchers; import java.util.Collections; @@ -56,4 +62,76 @@ public void testOverrideDefaultSimilarity() { SimilarityService service = new SimilarityService(indexSettings, null, Collections.emptyMap()); assertTrue(service.getDefaultSimilarity() instanceof BooleanSimilarity); } + + public void testSimilarityValidation() { + Similarity negativeScoresSim = new Similarity() { + + @Override + public long computeNorm(FieldInvertState state) { + return state.getLength(); + } + + @Override + public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + return new SimScorer() { + + @Override + public float score(float freq, long norm) { + return -1; + } + + }; + } + }; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> SimilarityService.validateSimilarity(Version.V_7_0_0_alpha1, negativeScoresSim)); + assertThat(e.getMessage(), Matchers.containsString("Similarities should not return negative scores")); + + Similarity decreasingScoresWithFreqSim = new Similarity() { + + @Override + public long computeNorm(FieldInvertState state) { + return state.getLength(); + } + + @Override + public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + return new SimScorer() { + + @Override + public float score(float freq, long norm) { + return 1 / (freq + norm); + } + + }; + } + }; + e = expectThrows(IllegalArgumentException.class, + () -> SimilarityService.validateSimilarity(Version.V_7_0_0_alpha1, decreasingScoresWithFreqSim)); + assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not decrease when term frequency increases")); + + Similarity increasingScoresWithNormSim = new Similarity() { + + @Override + public long computeNorm(FieldInvertState state) { + return state.getLength(); + } + + @Override + public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + return new SimScorer() { + + @Override + public float score(float freq, long norm) { + return freq + norm; + } + + }; + } + }; + e = expectThrows(IllegalArgumentException.class, + () -> SimilarityService.validateSimilarity(Version.V_7_0_0_alpha1, increasingScoresWithNormSim)); + assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not increase when norm increases")); + } + } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 35416c617fdd0..b4e98775d97ac 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices; import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -56,6 +57,7 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.similarity.NonNegativeScoresSimilarity; import org.elasticsearch.indices.IndicesService.ShardDeletionCheckResult; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.MapperPlugin; @@ -448,8 +450,10 @@ public void testStandAloneMapperServiceWithPlugins() throws IOException { .build(); MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); assertNotNull(mapperService.documentMapperParser().parserContext("type").typeParser("fake-mapper")); - assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test").get(), - instanceOf(BM25Similarity.class)); + Similarity sim = mapperService.documentMapperParser().parserContext("type").getSimilarity("test").get(); + assertThat(sim, instanceOf(NonNegativeScoresSimilarity.class)); + sim = ((NonNegativeScoresSimilarity) sim).getDelegate(); + assertThat(sim, instanceOf(BM25Similarity.class)); } public void testStatsByShardDoesNotDieFromExpectedExceptions() { From 61e1df0274cb74d3c90a7ca4504d233114ae4e85 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 19 Sep 2018 09:28:38 +0200 Subject: [PATCH 37/46] Use the global doc id to generate a random score (#33599) This commit changes the random_score function to use the global docID of the document rather than the segment docID to generate random scores. As a result documents that have the same segment docID within the shard will generate different scores. --- .../common/lucene/search/function/RandomScoreFunction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java index 8694b6fa019f1..b55730f514ac9 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java @@ -70,7 +70,7 @@ public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) { public double score(int docId, float subQueryScore) throws IOException { int hash; if (values == null) { - hash = BitMixer.mix(docId, saltedSeed); + hash = BitMixer.mix(ctx.docBase + docId, saltedSeed); } else if (values.advanceExact(docId)) { hash = StringHelper.murmurhash3_x86_32(values.nextValue(), saltedSeed); } else { From c9765d5fb91ef549d51eaa71daf7ed12e461df77 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 19 Sep 2018 08:36:03 +0100 Subject: [PATCH 38/46] Emphasize that filesystem-level backups don't work (#33102) It is not obvious that a filesystem-level backup may capture an inconsistent set of files that may fail on restore, or (worse) succeed having silently discarded some data. This change spells the out, and reorganises the first page or so of the snapshot/restore docs to make this warning fit more nicely. --- docs/reference/modules/snapshots.asciidoc | 49 +++++++++++++++++------ 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index ba6adf1d35fb8..23051f622f755 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -1,22 +1,47 @@ [[modules-snapshots]] == Snapshot And Restore -You can store snapshots of individual indices or an entire cluster in -a remote repository like a shared file system, S3, or HDFS. These snapshots -are great for backups because they can be restored relatively quickly. However, -snapshots can only be restored to versions of Elasticsearch that can read the -indices: +A snapshot is a backup taken from a running Elasticsearch cluster. You can take +a snapshot of individual indices or of the entire cluster and store it in a +repository on a shared filesystem, and there are plugins that support remote +repositories on S3, HDFS, Azure, Google Cloud Storage and more. + +Snapshots are taken incrementally. This means that when creating a snapshot of +an index Elasticsearch will avoid copying any data that is already stored in +the repository as part of an earlier snapshot of the same index. Therefore it +can be efficient to take snapshots of your cluster quite frequently. + +Snapshots can be restored into a running cluster via the restore API. When +restoring an index it is possible to alter the name of the restored index as +well as some of its settings, allowing a great deal of flexibility in how the +snapshot and restore functionality can be used. + +WARNING: It is not possible to back up an Elasticsearch cluster simply by +taking a copy of the data directories of all of its nodes. Elasticsearch may be +making changes to the contents of its data directories while it is running, and +this means that copying its data directories cannot be expected to capture a +consistent picture of their contents. Attempts to restore a cluster from such a +backup may fail, reporting corruption and/or missing files, or may appear to +have succeeded having silently lost some of its data. The only reliable way to +back up a cluster is by using the snapshot and restore functionality. + +[float] +=== Version compatibility + +A snapshot contains a copy of the on-disk data structures that make up an +index. This means that snapshots can only be restored to versions of +Elasticsearch that can read the indices: * A snapshot of an index created in 5.x can be restored to 6.x. * A snapshot of an index created in 2.x can be restored to 5.x. * A snapshot of an index created in 1.x can be restored to 2.x. -Conversely, snapshots of indices created in 1.x **cannot** be restored to -5.x or 6.x, and snapshots of indices created in 2.x **cannot** be restored -to 6.x. +Conversely, snapshots of indices created in 1.x **cannot** be restored to 5.x +or 6.x, and snapshots of indices created in 2.x **cannot** be restored to 6.x. -Snapshots are incremental and can contain indices created in various -versions of Elasticsearch. If any indices in a snapshot were created in an +Each snapshot can contain indices created in various versions of Elasticsearch, +and when restoring a snapshot it must be possible to restore all of the indices +into the target cluster. If any indices in a snapshot were created in an incompatible version, you will not be able restore the snapshot. IMPORTANT: When backing up your data prior to an upgrade, keep in mind that you @@ -28,8 +53,8 @@ that is incompatible with the version of the cluster you are currently running, you can restore it on the latest compatible version and use <> to rebuild the index on the current version. Reindexing from remote is only possible if the original index has -source enabled. Retrieving and reindexing the data can take significantly longer -than simply restoring a snapshot. If you have a large amount of data, we +source enabled. Retrieving and reindexing the data can take significantly +longer than simply restoring a snapshot. If you have a large amount of data, we recommend testing the reindex from remote process with a subset of your data to understand the time requirements before proceeding. From d22b383b9cb5f17e7f2dbdd6524b3b80f56f2cdc Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Wed, 19 Sep 2018 10:16:24 +0200 Subject: [PATCH 39/46] SQL: Fix issue with options for QUERY() and MATCH(). (#33828) Previously multiple comma separated lists of options where not recognized correctly which resulted in only the last of them to be taked into account, e.g.: For the following query: SELECT * FROM test WHERE QUERY('search', 'default_field=foo', 'default_operator=and')" only the `default_operator=and` was finally passed to the ES query. Fixes: #32602 --- x-pack/plugin/sql/src/main/antlr/SqlBase.g4 | 10 +- .../predicate/fulltext/FullTextUtils.java | 12 +- .../xpack/sql/parser/ExpressionBuilder.java | 19 +- .../xpack/sql/parser/SqlBaseBaseListener.java | 12 + .../xpack/sql/parser/SqlBaseBaseVisitor.java | 7 + .../xpack/sql/parser/SqlBaseListener.java | 10 + .../xpack/sql/parser/SqlBaseParser.java | 1721 +++++++++-------- .../xpack/sql/parser/SqlBaseVisitor.java | 6 + .../fulltext/FullTextUtilsTests.java | 41 + .../xpack/sql/parser/SqlParserTests.java | 46 +- .../sql/src/main/resources/fulltext.csv-spec | 21 + 11 files changed, 1037 insertions(+), 868 deletions(-) create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtilsTests.java diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 index 396cc70920aeb..ca6fdece28153 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 @@ -163,14 +163,18 @@ expression booleanExpression : NOT booleanExpression #logicalNot | EXISTS '(' query ')' #exists - | QUERY '(' queryString=string (',' options=string)* ')' #stringQuery - | MATCH '(' singleField=qualifiedName ',' queryString=string (',' options=string)* ')' #matchQuery - | MATCH '(' multiFields=string ',' queryString=string (',' options=string)* ')' #multiMatchQuery + | QUERY '(' queryString=string matchQueryOptions ')' #stringQuery + | MATCH '(' singleField=qualifiedName ',' queryString=string matchQueryOptions ')' #matchQuery + | MATCH '(' multiFields=string ',' queryString=string matchQueryOptions ')' #multiMatchQuery | predicated #booleanDefault | left=booleanExpression operator=AND right=booleanExpression #logicalBinary | left=booleanExpression operator=OR right=booleanExpression #logicalBinary ; +matchQueryOptions + : (',' string)* + ; + // workaround for: // https://github.com/antlr/antlr4/issues/780 // https://github.com/antlr/antlr4/issues/781 diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java index f22f46cad2b03..bb57c7a154930 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java @@ -5,16 +5,16 @@ */ package org.elasticsearch.xpack.sql.expression.predicate.fulltext; -import java.util.LinkedHashMap; -import java.util.Locale; -import java.util.Map; -import java.util.Set; - import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.FullTextPredicate.Operator; import org.elasticsearch.xpack.sql.parser.ParsingException; import org.elasticsearch.xpack.sql.tree.Location; +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + import static java.util.Collections.emptyMap; abstract class FullTextUtils { @@ -26,7 +26,7 @@ static Map parseSettings(String options, Location location) { return emptyMap(); } String[] list = Strings.delimitedListToStringArray(options, DELIMITER); - Map op = new LinkedHashMap(list.length); + Map op = new LinkedHashMap<>(list.length); for (String entry : list) { String[] split = splitInTwo(entry, "="); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index 2719d39bbecb2..539713f3285b5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -67,6 +67,7 @@ import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LogicalBinaryContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LogicalNotContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.MatchQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.MatchQueryOptionsContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.MultiMatchQueryContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.NullLiteralContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.OrderByContext; @@ -99,6 +100,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.StringJoiner; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.sql.type.DataTypeConversion.conversionFor; @@ -324,18 +326,27 @@ public Object visitArithmeticBinary(ArithmeticBinaryContext ctx) { // @Override public Object visitStringQuery(StringQueryContext ctx) { - return new StringQueryPredicate(source(ctx), string(ctx.queryString), string(ctx.options)); + return new StringQueryPredicate(source(ctx), string(ctx.queryString), getQueryOptions(ctx.matchQueryOptions())); } @Override public Object visitMatchQuery(MatchQueryContext ctx) { return new MatchQueryPredicate(source(ctx), new UnresolvedAttribute(source(ctx.singleField), - visitQualifiedName(ctx.singleField)), string(ctx.queryString), string(ctx.options)); + visitQualifiedName(ctx.singleField)), string(ctx.queryString), getQueryOptions(ctx.matchQueryOptions())); } @Override public Object visitMultiMatchQuery(MultiMatchQueryContext ctx) { - return new MultiMatchQueryPredicate(source(ctx), string(ctx.multiFields), string(ctx.queryString), string(ctx.options)); + return new MultiMatchQueryPredicate(source(ctx), string(ctx.multiFields), string(ctx.queryString), + getQueryOptions(ctx.matchQueryOptions())); + } + + private String getQueryOptions(MatchQueryOptionsContext optionsCtx) { + StringJoiner sj = new StringJoiner(";"); + for (StringContext sc: optionsCtx.string()) { + sj.add(string(sc)); + } + return sj.toString(); } @Override @@ -676,4 +687,4 @@ public Literal visitGuidEscapedLiteral(GuidEscapedLiteralContext ctx) { return new Literal(source(ctx), string, DataType.KEYWORD); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java index 8f261c0d3d001..d3c025d240c16 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java @@ -527,6 +527,18 @@ class SqlBaseBaseListener implements SqlBaseListener { *

The default implementation does nothing.

*/ @Override public void exitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterMatchQueryOptions(SqlBaseParser.MatchQueryOptionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitMatchQueryOptions(SqlBaseParser.MatchQueryOptionsContext ctx) { } /** * {@inheritDoc} * diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java index 837e5057c36d5..8e7603947e799 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java @@ -312,6 +312,13 @@ class SqlBaseBaseVisitor extends AbstractParseTreeVisitor implements SqlBa * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitMatchQueryOptions(SqlBaseParser.MatchQueryOptionsContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java index 82c2ac90e7782..1c7364a970d8c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java @@ -489,6 +489,16 @@ interface SqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#matchQueryOptions}. + * @param ctx the parse tree + */ + void enterMatchQueryOptions(SqlBaseParser.MatchQueryOptionsContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#matchQueryOptions}. + * @param ctx the parse tree + */ + void exitMatchQueryOptions(SqlBaseParser.MatchQueryOptionsContext ctx); /** * Enter a parse tree produced by {@link SqlBaseParser#predicated}. * @param ctx the parse tree diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java index 164eacd402bf7..246de0cf1a9fe 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java @@ -40,27 +40,27 @@ class SqlBaseParser extends Parser { RULE_groupingElement = 11, RULE_groupingExpressions = 12, RULE_namedQuery = 13, RULE_setQuantifier = 14, RULE_selectItem = 15, RULE_relation = 16, RULE_joinRelation = 17, RULE_joinType = 18, RULE_joinCriteria = 19, RULE_relationPrimary = 20, - RULE_expression = 21, RULE_booleanExpression = 22, RULE_predicated = 23, - RULE_predicate = 24, RULE_likePattern = 25, RULE_pattern = 26, RULE_patternEscape = 27, - RULE_valueExpression = 28, RULE_primaryExpression = 29, RULE_castExpression = 30, - RULE_castTemplate = 31, RULE_extractExpression = 32, RULE_extractTemplate = 33, - RULE_functionExpression = 34, RULE_functionTemplate = 35, RULE_functionName = 36, - RULE_constant = 37, RULE_comparisonOperator = 38, RULE_booleanValue = 39, - RULE_dataType = 40, RULE_qualifiedName = 41, RULE_identifier = 42, RULE_tableIdentifier = 43, - RULE_quoteIdentifier = 44, RULE_unquoteIdentifier = 45, RULE_number = 46, - RULE_string = 47, RULE_nonReserved = 48; + RULE_expression = 21, RULE_booleanExpression = 22, RULE_matchQueryOptions = 23, + RULE_predicated = 24, RULE_predicate = 25, RULE_likePattern = 26, RULE_pattern = 27, + RULE_patternEscape = 28, RULE_valueExpression = 29, RULE_primaryExpression = 30, + RULE_castExpression = 31, RULE_castTemplate = 32, RULE_extractExpression = 33, + RULE_extractTemplate = 34, RULE_functionExpression = 35, RULE_functionTemplate = 36, + RULE_functionName = 37, RULE_constant = 38, RULE_comparisonOperator = 39, + RULE_booleanValue = 40, RULE_dataType = 41, RULE_qualifiedName = 42, RULE_identifier = 43, + RULE_tableIdentifier = 44, RULE_quoteIdentifier = 45, RULE_unquoteIdentifier = 46, + RULE_number = 47, RULE_string = 48, RULE_nonReserved = 49; public static final String[] ruleNames = { "singleStatement", "singleExpression", "statement", "query", "queryNoWith", "limitClause", "queryTerm", "orderBy", "querySpecification", "fromClause", "groupBy", "groupingElement", "groupingExpressions", "namedQuery", "setQuantifier", "selectItem", "relation", "joinRelation", "joinType", "joinCriteria", - "relationPrimary", "expression", "booleanExpression", "predicated", "predicate", - "likePattern", "pattern", "patternEscape", "valueExpression", "primaryExpression", - "castExpression", "castTemplate", "extractExpression", "extractTemplate", - "functionExpression", "functionTemplate", "functionName", "constant", - "comparisonOperator", "booleanValue", "dataType", "qualifiedName", "identifier", - "tableIdentifier", "quoteIdentifier", "unquoteIdentifier", "number", "string", - "nonReserved" + "relationPrimary", "expression", "booleanExpression", "matchQueryOptions", + "predicated", "predicate", "likePattern", "pattern", "patternEscape", + "valueExpression", "primaryExpression", "castExpression", "castTemplate", + "extractExpression", "extractTemplate", "functionExpression", "functionTemplate", + "functionName", "constant", "comparisonOperator", "booleanValue", "dataType", + "qualifiedName", "identifier", "tableIdentifier", "quoteIdentifier", "unquoteIdentifier", + "number", "string", "nonReserved" }; private static final String[] _LITERAL_NAMES = { @@ -174,9 +174,9 @@ public final SingleStatementContext singleStatement() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(98); + setState(100); statement(); - setState(99); + setState(101); match(EOF); } } @@ -221,9 +221,9 @@ public final SingleExpressionContext singleExpression() throws RecognitionExcept try { enterOuterAlt(_localctx, 1); { - setState(101); + setState(103); expression(); - setState(102); + setState(104); match(EOF); } } @@ -617,14 +617,14 @@ public final StatementContext statement() throws RecognitionException { enterRule(_localctx, 4, RULE_statement); int _la; try { - setState(204); + setState(206); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,17,_ctx) ) { case 1: _localctx = new StatementDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(104); + setState(106); query(); } break; @@ -632,27 +632,27 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ExplainContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(105); + setState(107); match(EXPLAIN); - setState(119); + setState(121); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { case 1: { - setState(106); + setState(108); match(T__0); - setState(115); + setState(117); _errHandler.sync(this); _la = _input.LA(1); while (((((_la - 28)) & ~0x3f) == 0 && ((1L << (_la - 28)) & ((1L << (FORMAT - 28)) | (1L << (PLAN - 28)) | (1L << (VERIFY - 28)))) != 0)) { { - setState(113); + setState(115); switch (_input.LA(1)) { case PLAN: { - setState(107); + setState(109); match(PLAN); - setState(108); + setState(110); ((ExplainContext)_localctx).type = _input.LT(1); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ALL) | (1L << ANALYZED) | (1L << EXECUTABLE) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED))) != 0)) ) { @@ -664,9 +664,9 @@ public final StatementContext statement() throws RecognitionException { break; case FORMAT: { - setState(109); + setState(111); match(FORMAT); - setState(110); + setState(112); ((ExplainContext)_localctx).format = _input.LT(1); _la = _input.LA(1); if ( !(_la==GRAPHVIZ || _la==TEXT) ) { @@ -678,9 +678,9 @@ public final StatementContext statement() throws RecognitionException { break; case VERIFY: { - setState(111); + setState(113); match(VERIFY); - setState(112); + setState(114); ((ExplainContext)_localctx).verify = booleanValue(); } break; @@ -688,16 +688,16 @@ public final StatementContext statement() throws RecognitionException { throw new NoViableAltException(this); } } - setState(117); + setState(119); _errHandler.sync(this); _la = _input.LA(1); } - setState(118); + setState(120); match(T__1); } break; } - setState(121); + setState(123); statement(); } break; @@ -705,27 +705,27 @@ public final StatementContext statement() throws RecognitionException { _localctx = new DebugContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(122); + setState(124); match(DEBUG); - setState(134); + setState(136); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { case 1: { - setState(123); + setState(125); match(T__0); - setState(130); + setState(132); _errHandler.sync(this); _la = _input.LA(1); while (_la==FORMAT || _la==PLAN) { { - setState(128); + setState(130); switch (_input.LA(1)) { case PLAN: { - setState(124); + setState(126); match(PLAN); - setState(125); + setState(127); ((DebugContext)_localctx).type = _input.LT(1); _la = _input.LA(1); if ( !(_la==ANALYZED || _la==OPTIMIZED) ) { @@ -737,9 +737,9 @@ public final StatementContext statement() throws RecognitionException { break; case FORMAT: { - setState(126); + setState(128); match(FORMAT); - setState(127); + setState(129); ((DebugContext)_localctx).format = _input.LT(1); _la = _input.LA(1); if ( !(_la==GRAPHVIZ || _la==TEXT) ) { @@ -753,16 +753,16 @@ public final StatementContext statement() throws RecognitionException { throw new NoViableAltException(this); } } - setState(132); + setState(134); _errHandler.sync(this); _la = _input.LA(1); } - setState(133); + setState(135); match(T__1); } break; } - setState(136); + setState(138); statement(); } break; @@ -770,15 +770,15 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowTablesContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(137); + setState(139); match(SHOW); - setState(138); + setState(140); match(TABLES); - setState(141); + setState(143); switch (_input.LA(1)) { case LIKE: { - setState(139); + setState(141); ((ShowTablesContext)_localctx).tableLike = likePattern(); } break; @@ -813,7 +813,7 @@ public final StatementContext statement() throws RecognitionException { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: { - setState(140); + setState(142); ((ShowTablesContext)_localctx).tableIdent = tableIdentifier(); } break; @@ -828,22 +828,22 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowColumnsContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(143); + setState(145); match(SHOW); - setState(144); + setState(146); match(COLUMNS); - setState(145); + setState(147); _la = _input.LA(1); if ( !(_la==FROM || _la==IN) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(148); + setState(150); switch (_input.LA(1)) { case LIKE: { - setState(146); + setState(148); ((ShowColumnsContext)_localctx).tableLike = likePattern(); } break; @@ -878,7 +878,7 @@ public final StatementContext statement() throws RecognitionException { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: { - setState(147); + setState(149); ((ShowColumnsContext)_localctx).tableIdent = tableIdentifier(); } break; @@ -891,18 +891,18 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowColumnsContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(150); + setState(152); _la = _input.LA(1); if ( !(_la==DESC || _la==DESCRIBE) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(153); + setState(155); switch (_input.LA(1)) { case LIKE: { - setState(151); + setState(153); ((ShowColumnsContext)_localctx).tableLike = likePattern(); } break; @@ -937,7 +937,7 @@ public final StatementContext statement() throws RecognitionException { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: { - setState(152); + setState(154); ((ShowColumnsContext)_localctx).tableIdent = tableIdentifier(); } break; @@ -950,15 +950,15 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowFunctionsContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(155); + setState(157); match(SHOW); - setState(156); - match(FUNCTIONS); setState(158); + match(FUNCTIONS); + setState(160); _la = _input.LA(1); if (_la==LIKE) { { - setState(157); + setState(159); likePattern(); } } @@ -969,9 +969,9 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowSchemasContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(160); + setState(162); match(SHOW); - setState(161); + setState(163); match(SCHEMAS); } break; @@ -979,9 +979,9 @@ public final StatementContext statement() throws RecognitionException { _localctx = new SysCatalogsContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(162); + setState(164); match(SYS); - setState(163); + setState(165); match(CATALOGS); } break; @@ -989,58 +989,58 @@ public final StatementContext statement() throws RecognitionException { _localctx = new SysTablesContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(164); + setState(166); match(SYS); - setState(165); + setState(167); match(TABLES); - setState(168); + setState(170); _la = _input.LA(1); if (_la==CATALOG) { { - setState(166); + setState(168); match(CATALOG); - setState(167); + setState(169); ((SysTablesContext)_localctx).clusterLike = likePattern(); } } - setState(172); + setState(174); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { case 1: { - setState(170); + setState(172); ((SysTablesContext)_localctx).tableLike = likePattern(); } break; case 2: { - setState(171); + setState(173); ((SysTablesContext)_localctx).tableIdent = tableIdentifier(); } break; } - setState(183); + setState(185); _la = _input.LA(1); if (_la==TYPE) { { - setState(174); + setState(176); match(TYPE); - setState(175); + setState(177); string(); - setState(180); + setState(182); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(176); + setState(178); match(T__2); - setState(177); + setState(179); string(); } } - setState(182); + setState(184); _errHandler.sync(this); _la = _input.LA(1); } @@ -1053,28 +1053,28 @@ public final StatementContext statement() throws RecognitionException { _localctx = new SysColumnsContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(185); + setState(187); match(SYS); - setState(186); + setState(188); match(COLUMNS); - setState(189); + setState(191); _la = _input.LA(1); if (_la==CATALOG) { { - setState(187); + setState(189); match(CATALOG); - setState(188); + setState(190); ((SysColumnsContext)_localctx).cluster = string(); } } - setState(194); + setState(196); switch (_input.LA(1)) { case TABLE: { - setState(191); + setState(193); match(TABLE); - setState(192); + setState(194); ((SysColumnsContext)_localctx).tableLike = likePattern(); } break; @@ -1109,7 +1109,7 @@ public final StatementContext statement() throws RecognitionException { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: { - setState(193); + setState(195); ((SysColumnsContext)_localctx).tableIdent = tableIdentifier(); } break; @@ -1119,11 +1119,11 @@ public final StatementContext statement() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(197); + setState(199); _la = _input.LA(1); if (_la==LIKE) { { - setState(196); + setState(198); ((SysColumnsContext)_localctx).columnPattern = likePattern(); } } @@ -1134,9 +1134,9 @@ public final StatementContext statement() throws RecognitionException { _localctx = new SysTypesContext(_localctx); enterOuterAlt(_localctx, 12); { - setState(199); + setState(201); match(SYS); - setState(200); + setState(202); match(TYPES); } break; @@ -1144,11 +1144,11 @@ public final StatementContext statement() throws RecognitionException { _localctx = new SysTableTypesContext(_localctx); enterOuterAlt(_localctx, 13); { - setState(201); + setState(203); match(SYS); - setState(202); + setState(204); match(TABLE); - setState(203); + setState(205); match(TYPES); } break; @@ -1202,34 +1202,34 @@ public final QueryContext query() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(215); + setState(217); _la = _input.LA(1); if (_la==WITH) { { - setState(206); + setState(208); match(WITH); - setState(207); + setState(209); namedQuery(); - setState(212); + setState(214); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(208); + setState(210); match(T__2); - setState(209); + setState(211); namedQuery(); } } - setState(214); + setState(216); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(217); + setState(219); queryNoWith(); } } @@ -1285,42 +1285,42 @@ public final QueryNoWithContext queryNoWith() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(219); + setState(221); queryTerm(); - setState(230); + setState(232); _la = _input.LA(1); if (_la==ORDER) { { - setState(220); + setState(222); match(ORDER); - setState(221); + setState(223); match(BY); - setState(222); + setState(224); orderBy(); - setState(227); + setState(229); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(223); + setState(225); match(T__2); - setState(224); + setState(226); orderBy(); } } - setState(229); + setState(231); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(233); + setState(235); _la = _input.LA(1); if (_la==LIMIT || _la==LIMIT_ESC) { { - setState(232); + setState(234); limitClause(); } } @@ -1369,14 +1369,14 @@ public final LimitClauseContext limitClause() throws RecognitionException { enterRule(_localctx, 10, RULE_limitClause); int _la; try { - setState(240); + setState(242); switch (_input.LA(1)) { case LIMIT: enterOuterAlt(_localctx, 1); { - setState(235); + setState(237); match(LIMIT); - setState(236); + setState(238); ((LimitClauseContext)_localctx).limit = _input.LT(1); _la = _input.LA(1); if ( !(_la==ALL || _la==INTEGER_VALUE) ) { @@ -1389,9 +1389,9 @@ public final LimitClauseContext limitClause() throws RecognitionException { case LIMIT_ESC: enterOuterAlt(_localctx, 2); { - setState(237); + setState(239); match(LIMIT_ESC); - setState(238); + setState(240); ((LimitClauseContext)_localctx).limit = _input.LT(1); _la = _input.LA(1); if ( !(_la==ALL || _la==INTEGER_VALUE) ) { @@ -1399,7 +1399,7 @@ public final LimitClauseContext limitClause() throws RecognitionException { } else { consume(); } - setState(239); + setState(241); match(ESC_END); } break; @@ -1472,13 +1472,13 @@ public final QueryTermContext queryTerm() throws RecognitionException { QueryTermContext _localctx = new QueryTermContext(_ctx, getState()); enterRule(_localctx, 12, RULE_queryTerm); try { - setState(247); + setState(249); switch (_input.LA(1)) { case SELECT: _localctx = new QueryPrimaryDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(242); + setState(244); querySpecification(); } break; @@ -1486,11 +1486,11 @@ public final QueryTermContext queryTerm() throws RecognitionException { _localctx = new SubqueryContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(243); + setState(245); match(T__0); - setState(244); + setState(246); queryNoWith(); - setState(245); + setState(247); match(T__1); } break; @@ -1542,13 +1542,13 @@ public final OrderByContext orderBy() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(249); - expression(); setState(251); + expression(); + setState(253); _la = _input.LA(1); if (_la==ASC || _la==DESC) { { - setState(250); + setState(252); ((OrderByContext)_localctx).ordering = _input.LT(1); _la = _input.LA(1); if ( !(_la==ASC || _la==DESC) ) { @@ -1627,75 +1627,75 @@ public final QuerySpecificationContext querySpecification() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(253); - match(SELECT); setState(255); + match(SELECT); + setState(257); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(254); + setState(256); setQuantifier(); } } - setState(257); + setState(259); selectItem(); - setState(262); + setState(264); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(258); + setState(260); match(T__2); - setState(259); + setState(261); selectItem(); } } - setState(264); + setState(266); _errHandler.sync(this); _la = _input.LA(1); } - setState(266); + setState(268); _la = _input.LA(1); if (_la==FROM) { { - setState(265); + setState(267); fromClause(); } } - setState(270); + setState(272); _la = _input.LA(1); if (_la==WHERE) { { - setState(268); + setState(270); match(WHERE); - setState(269); + setState(271); ((QuerySpecificationContext)_localctx).where = booleanExpression(0); } } - setState(275); + setState(277); _la = _input.LA(1); if (_la==GROUP) { { - setState(272); + setState(274); match(GROUP); - setState(273); + setState(275); match(BY); - setState(274); + setState(276); groupBy(); } } - setState(279); + setState(281); _la = _input.LA(1); if (_la==HAVING) { { - setState(277); + setState(279); match(HAVING); - setState(278); + setState(280); ((QuerySpecificationContext)_localctx).having = booleanExpression(0); } } @@ -1747,23 +1747,23 @@ public final FromClauseContext fromClause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(281); + setState(283); match(FROM); - setState(282); + setState(284); relation(); - setState(287); + setState(289); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(283); + setState(285); match(T__2); - setState(284); + setState(286); relation(); } } - setState(289); + setState(291); _errHandler.sync(this); _la = _input.LA(1); } @@ -1816,30 +1816,30 @@ public final GroupByContext groupBy() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(291); + setState(293); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(290); + setState(292); setQuantifier(); } } - setState(293); + setState(295); groupingElement(); - setState(298); + setState(300); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(294); + setState(296); match(T__2); - setState(295); + setState(297); groupingElement(); } } - setState(300); + setState(302); _errHandler.sync(this); _la = _input.LA(1); } @@ -1894,7 +1894,7 @@ public final GroupingElementContext groupingElement() throws RecognitionExceptio _localctx = new SingleGroupingSetContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(301); + setState(303); groupingExpressions(); } } @@ -1940,47 +1940,47 @@ public final GroupingExpressionsContext groupingExpressions() throws Recognition enterRule(_localctx, 24, RULE_groupingExpressions); int _la; try { - setState(316); + setState(318); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(303); + setState(305); match(T__0); - setState(312); + setState(314); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LEFT) | (1L << MAPPED) | (1L << MATCH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RIGHT) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TRUE - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (FUNCTION_ESC - 64)) | (1L << (DATE_ESC - 64)) | (1L << (TIME_ESC - 64)) | (1L << (TIMESTAMP_ESC - 64)) | (1L << (GUID_ESC - 64)) | (1L << (PLUS - 64)) | (1L << (MINUS - 64)) | (1L << (ASTERISK - 64)) | (1L << (PARAM - 64)) | (1L << (STRING - 64)) | (1L << (INTEGER_VALUE - 64)) | (1L << (DECIMAL_VALUE - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(304); + setState(306); expression(); - setState(309); + setState(311); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(305); + setState(307); match(T__2); - setState(306); + setState(308); expression(); } } - setState(311); + setState(313); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(314); + setState(316); match(T__1); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(315); + setState(317); expression(); } break; @@ -2031,15 +2031,15 @@ public final NamedQueryContext namedQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(318); + setState(320); ((NamedQueryContext)_localctx).name = identifier(); - setState(319); + setState(321); match(AS); - setState(320); + setState(322); match(T__0); - setState(321); + setState(323); queryNoWith(); - setState(322); + setState(324); match(T__1); } } @@ -2083,7 +2083,7 @@ public final SetQuantifierContext setQuantifier() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(324); + setState(326); _la = _input.LA(1); if ( !(_la==ALL || _la==DISTINCT) ) { _errHandler.recoverInline(this); @@ -2146,22 +2146,22 @@ public final SelectItemContext selectItem() throws RecognitionException { _localctx = new SelectExpressionContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(326); + setState(328); expression(); - setState(331); + setState(333); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(328); + setState(330); _la = _input.LA(1); if (_la==AS) { { - setState(327); + setState(329); match(AS); } } - setState(330); + setState(332); identifier(); } } @@ -2215,19 +2215,19 @@ public final RelationContext relation() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(333); + setState(335); relationPrimary(); - setState(337); + setState(339); _errHandler.sync(this); _la = _input.LA(1); while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << FULL) | (1L << INNER) | (1L << JOIN) | (1L << LEFT) | (1L << NATURAL) | (1L << RIGHT))) != 0)) { { { - setState(334); + setState(336); joinRelation(); } } - setState(339); + setState(341); _errHandler.sync(this); _la = _input.LA(1); } @@ -2281,7 +2281,7 @@ public final JoinRelationContext joinRelation() throws RecognitionException { enterRule(_localctx, 34, RULE_joinRelation); int _la; try { - setState(351); + setState(353); switch (_input.LA(1)) { case FULL: case INNER: @@ -2291,18 +2291,18 @@ public final JoinRelationContext joinRelation() throws RecognitionException { enterOuterAlt(_localctx, 1); { { - setState(340); + setState(342); joinType(); } - setState(341); + setState(343); match(JOIN); - setState(342); - ((JoinRelationContext)_localctx).right = relationPrimary(); setState(344); + ((JoinRelationContext)_localctx).right = relationPrimary(); + setState(346); _la = _input.LA(1); if (_la==ON || _la==USING) { { - setState(343); + setState(345); joinCriteria(); } } @@ -2312,13 +2312,13 @@ public final JoinRelationContext joinRelation() throws RecognitionException { case NATURAL: enterOuterAlt(_localctx, 2); { - setState(346); + setState(348); match(NATURAL); - setState(347); + setState(349); joinType(); - setState(348); + setState(350); match(JOIN); - setState(349); + setState(351); ((JoinRelationContext)_localctx).right = relationPrimary(); } break; @@ -2367,17 +2367,17 @@ public final JoinTypeContext joinType() throws RecognitionException { enterRule(_localctx, 36, RULE_joinType); int _la; try { - setState(368); + setState(370); switch (_input.LA(1)) { case INNER: case JOIN: enterOuterAlt(_localctx, 1); { - setState(354); + setState(356); _la = _input.LA(1); if (_la==INNER) { { - setState(353); + setState(355); match(INNER); } } @@ -2387,13 +2387,13 @@ public final JoinTypeContext joinType() throws RecognitionException { case LEFT: enterOuterAlt(_localctx, 2); { - setState(356); - match(LEFT); setState(358); + match(LEFT); + setState(360); _la = _input.LA(1); if (_la==OUTER) { { - setState(357); + setState(359); match(OUTER); } } @@ -2403,13 +2403,13 @@ public final JoinTypeContext joinType() throws RecognitionException { case RIGHT: enterOuterAlt(_localctx, 3); { - setState(360); - match(RIGHT); setState(362); + match(RIGHT); + setState(364); _la = _input.LA(1); if (_la==OUTER) { { - setState(361); + setState(363); match(OUTER); } } @@ -2419,13 +2419,13 @@ public final JoinTypeContext joinType() throws RecognitionException { case FULL: enterOuterAlt(_localctx, 4); { - setState(364); - match(FULL); setState(366); + match(FULL); + setState(368); _la = _input.LA(1); if (_la==OUTER) { { - setState(365); + setState(367); match(OUTER); } } @@ -2483,43 +2483,43 @@ public final JoinCriteriaContext joinCriteria() throws RecognitionException { enterRule(_localctx, 38, RULE_joinCriteria); int _la; try { - setState(384); + setState(386); switch (_input.LA(1)) { case ON: enterOuterAlt(_localctx, 1); { - setState(370); + setState(372); match(ON); - setState(371); + setState(373); booleanExpression(0); } break; case USING: enterOuterAlt(_localctx, 2); { - setState(372); + setState(374); match(USING); - setState(373); + setState(375); match(T__0); - setState(374); + setState(376); identifier(); - setState(379); + setState(381); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(375); + setState(377); match(T__2); - setState(376); + setState(378); identifier(); } } - setState(381); + setState(383); _errHandler.sync(this); _la = _input.LA(1); } - setState(382); + setState(384); match(T__1); } break; @@ -2624,29 +2624,29 @@ public final RelationPrimaryContext relationPrimary() throws RecognitionExceptio enterRule(_localctx, 40, RULE_relationPrimary); int _la; try { - setState(411); + setState(413); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,56,_ctx) ) { case 1: _localctx = new TableNameContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(386); + setState(388); tableIdentifier(); - setState(391); + setState(393); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(388); + setState(390); _la = _input.LA(1); if (_la==AS) { { - setState(387); + setState(389); match(AS); } } - setState(390); + setState(392); qualifiedName(); } } @@ -2657,26 +2657,26 @@ public final RelationPrimaryContext relationPrimary() throws RecognitionExceptio _localctx = new AliasedQueryContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(393); + setState(395); match(T__0); - setState(394); + setState(396); queryNoWith(); - setState(395); + setState(397); match(T__1); - setState(400); + setState(402); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(397); + setState(399); _la = _input.LA(1); if (_la==AS) { { - setState(396); + setState(398); match(AS); } } - setState(399); + setState(401); qualifiedName(); } } @@ -2687,26 +2687,26 @@ public final RelationPrimaryContext relationPrimary() throws RecognitionExceptio _localctx = new AliasedRelationContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(402); + setState(404); match(T__0); - setState(403); + setState(405); relation(); - setState(404); + setState(406); match(T__1); - setState(409); + setState(411); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(406); + setState(408); _la = _input.LA(1); if (_la==AS) { { - setState(405); + setState(407); match(AS); } } - setState(408); + setState(410); qualifiedName(); } } @@ -2755,7 +2755,7 @@ public final ExpressionContext expression() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(413); + setState(415); booleanExpression(0); } } @@ -2803,13 +2803,12 @@ public T accept(ParseTreeVisitor visitor) { } public static class StringQueryContext extends BooleanExpressionContext { public StringContext queryString; - public StringContext options; public TerminalNode QUERY() { return getToken(SqlBaseParser.QUERY, 0); } - public List string() { - return getRuleContexts(StringContext.class); + public MatchQueryOptionsContext matchQueryOptions() { + return getRuleContext(MatchQueryOptionsContext.class,0); } - public StringContext string(int i) { - return getRuleContext(StringContext.class,i); + public StringContext string() { + return getRuleContext(StringContext.class,0); } public StringQueryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } @Override @@ -2868,8 +2867,10 @@ public T accept(ParseTreeVisitor visitor) { public static class MultiMatchQueryContext extends BooleanExpressionContext { public StringContext multiFields; public StringContext queryString; - public StringContext options; public TerminalNode MATCH() { return getToken(SqlBaseParser.MATCH, 0); } + public MatchQueryOptionsContext matchQueryOptions() { + return getRuleContext(MatchQueryOptionsContext.class,0); + } public List string() { return getRuleContexts(StringContext.class); } @@ -2894,16 +2895,15 @@ public T accept(ParseTreeVisitor visitor) { public static class MatchQueryContext extends BooleanExpressionContext { public QualifiedNameContext singleField; public StringContext queryString; - public StringContext options; public TerminalNode MATCH() { return getToken(SqlBaseParser.MATCH, 0); } + public MatchQueryOptionsContext matchQueryOptions() { + return getRuleContext(MatchQueryOptionsContext.class,0); + } public QualifiedNameContext qualifiedName() { return getRuleContext(QualifiedNameContext.class,0); } - public List string() { - return getRuleContexts(StringContext.class); - } - public StringContext string(int i) { - return getRuleContext(StringContext.class,i); + public StringContext string() { + return getRuleContext(StringContext.class,0); } public MatchQueryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } @Override @@ -2959,23 +2959,22 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc BooleanExpressionContext _prevctx = _localctx; int _startState = 44; enterRecursionRule(_localctx, 44, RULE_booleanExpression, _p); - int _la; try { int _alt; enterOuterAlt(_localctx, 1); { - setState(464); + setState(448); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,60,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,57,_ctx) ) { case 1: { _localctx = new LogicalNotContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(416); + setState(418); match(NOT); - setState(417); + setState(419); booleanExpression(8); } break; @@ -2984,13 +2983,13 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new ExistsContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(418); + setState(420); match(EXISTS); - setState(419); + setState(421); match(T__0); - setState(420); + setState(422); query(); - setState(421); + setState(423); match(T__1); } break; @@ -2999,29 +2998,15 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new StringQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(423); + setState(425); match(QUERY); - setState(424); + setState(426); match(T__0); - setState(425); + setState(427); ((StringQueryContext)_localctx).queryString = string(); - setState(430); - _errHandler.sync(this); - _la = _input.LA(1); - while (_la==T__2) { - { - { - setState(426); - match(T__2); - setState(427); - ((StringQueryContext)_localctx).options = string(); - } - } - setState(432); - _errHandler.sync(this); - _la = _input.LA(1); - } - setState(433); + setState(428); + matchQueryOptions(); + setState(429); match(T__1); } break; @@ -3030,33 +3015,19 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new MatchQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(435); + setState(431); match(MATCH); - setState(436); + setState(432); match(T__0); - setState(437); + setState(433); ((MatchQueryContext)_localctx).singleField = qualifiedName(); - setState(438); + setState(434); match(T__2); - setState(439); + setState(435); ((MatchQueryContext)_localctx).queryString = string(); - setState(444); - _errHandler.sync(this); - _la = _input.LA(1); - while (_la==T__2) { - { - { - setState(440); - match(T__2); - setState(441); - ((MatchQueryContext)_localctx).options = string(); - } - } - setState(446); - _errHandler.sync(this); - _la = _input.LA(1); - } - setState(447); + setState(436); + matchQueryOptions(); + setState(437); match(T__1); } break; @@ -3065,33 +3036,19 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new MultiMatchQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(449); + setState(439); match(MATCH); - setState(450); + setState(440); match(T__0); - setState(451); + setState(441); ((MultiMatchQueryContext)_localctx).multiFields = string(); - setState(452); + setState(442); match(T__2); - setState(453); + setState(443); ((MultiMatchQueryContext)_localctx).queryString = string(); - setState(458); - _errHandler.sync(this); - _la = _input.LA(1); - while (_la==T__2) { - { - { - setState(454); - match(T__2); - setState(455); - ((MultiMatchQueryContext)_localctx).options = string(); - } - } - setState(460); - _errHandler.sync(this); - _la = _input.LA(1); - } - setState(461); + setState(444); + matchQueryOptions(); + setState(445); match(T__1); } break; @@ -3100,33 +3057,33 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new BooleanDefaultContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(463); + setState(447); predicated(); } break; } _ctx.stop = _input.LT(-1); - setState(474); + setState(458); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,62,_ctx); + _alt = getInterpreter().adaptivePredict(_input,59,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(472); + setState(456); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,61,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,58,_ctx) ) { case 1: { _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(466); + setState(450); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(467); + setState(451); ((LogicalBinaryContext)_localctx).operator = match(AND); - setState(468); + setState(452); ((LogicalBinaryContext)_localctx).right = booleanExpression(3); } break; @@ -3135,20 +3092,20 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(469); + setState(453); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(470); + setState(454); ((LogicalBinaryContext)_localctx).operator = match(OR); - setState(471); + setState(455); ((LogicalBinaryContext)_localctx).right = booleanExpression(2); } break; } } } - setState(476); + setState(460); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,62,_ctx); + _alt = getInterpreter().adaptivePredict(_input,59,_ctx); } } } @@ -3163,6 +3120,68 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc return _localctx; } + public static class MatchQueryOptionsContext extends ParserRuleContext { + public List string() { + return getRuleContexts(StringContext.class); + } + public StringContext string(int i) { + return getRuleContext(StringContext.class,i); + } + public MatchQueryOptionsContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_matchQueryOptions; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterMatchQueryOptions(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitMatchQueryOptions(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitMatchQueryOptions(this); + else return visitor.visitChildren(this); + } + } + + public final MatchQueryOptionsContext matchQueryOptions() throws RecognitionException { + MatchQueryOptionsContext _localctx = new MatchQueryOptionsContext(_ctx, getState()); + enterRule(_localctx, 46, RULE_matchQueryOptions); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(465); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(461); + match(T__2); + setState(462); + string(); + } + } + setState(467); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class PredicatedContext extends ParserRuleContext { public ValueExpressionContext valueExpression() { return getRuleContext(ValueExpressionContext.class,0); @@ -3191,18 +3210,18 @@ public T accept(ParseTreeVisitor visitor) { public final PredicatedContext predicated() throws RecognitionException { PredicatedContext _localctx = new PredicatedContext(_ctx, getState()); - enterRule(_localctx, 46, RULE_predicated); + enterRule(_localctx, 48, RULE_predicated); try { enterOuterAlt(_localctx, 1); { - setState(477); + setState(468); valueExpression(0); - setState(479); + setState(470); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,63,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,61,_ctx) ) { case 1: { - setState(478); + setState(469); predicate(); } break; @@ -3275,145 +3294,145 @@ public T accept(ParseTreeVisitor visitor) { public final PredicateContext predicate() throws RecognitionException { PredicateContext _localctx = new PredicateContext(_ctx, getState()); - enterRule(_localctx, 48, RULE_predicate); + enterRule(_localctx, 50, RULE_predicate); int _la; try { - setState(527); + setState(518); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,71,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,69,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(482); + setState(473); _la = _input.LA(1); if (_la==NOT) { { - setState(481); + setState(472); match(NOT); } } - setState(484); + setState(475); ((PredicateContext)_localctx).kind = match(BETWEEN); - setState(485); + setState(476); ((PredicateContext)_localctx).lower = valueExpression(0); - setState(486); + setState(477); match(AND); - setState(487); + setState(478); ((PredicateContext)_localctx).upper = valueExpression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(490); + setState(481); _la = _input.LA(1); if (_la==NOT) { { - setState(489); + setState(480); match(NOT); } } - setState(492); + setState(483); ((PredicateContext)_localctx).kind = match(IN); - setState(493); + setState(484); match(T__0); - setState(494); + setState(485); expression(); - setState(499); + setState(490); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(495); + setState(486); match(T__2); - setState(496); + setState(487); expression(); } } - setState(501); + setState(492); _errHandler.sync(this); _la = _input.LA(1); } - setState(502); + setState(493); match(T__1); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(505); + setState(496); _la = _input.LA(1); if (_la==NOT) { { - setState(504); + setState(495); match(NOT); } } - setState(507); + setState(498); ((PredicateContext)_localctx).kind = match(IN); - setState(508); + setState(499); match(T__0); - setState(509); + setState(500); query(); - setState(510); + setState(501); match(T__1); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(513); + setState(504); _la = _input.LA(1); if (_la==NOT) { { - setState(512); + setState(503); match(NOT); } } - setState(515); + setState(506); ((PredicateContext)_localctx).kind = match(LIKE); - setState(516); + setState(507); pattern(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(518); + setState(509); _la = _input.LA(1); if (_la==NOT) { { - setState(517); + setState(508); match(NOT); } } - setState(520); + setState(511); ((PredicateContext)_localctx).kind = match(RLIKE); - setState(521); + setState(512); ((PredicateContext)_localctx).regex = string(); } break; case 6: enterOuterAlt(_localctx, 6); { - setState(522); + setState(513); match(IS); - setState(524); + setState(515); _la = _input.LA(1); if (_la==NOT) { { - setState(523); + setState(514); match(NOT); } } - setState(526); + setState(517); ((PredicateContext)_localctx).kind = match(NULL); } break; @@ -3456,13 +3475,13 @@ public T accept(ParseTreeVisitor visitor) { public final LikePatternContext likePattern() throws RecognitionException { LikePatternContext _localctx = new LikePatternContext(_ctx, getState()); - enterRule(_localctx, 50, RULE_likePattern); + enterRule(_localctx, 52, RULE_likePattern); try { enterOuterAlt(_localctx, 1); { - setState(529); + setState(520); match(LIKE); - setState(530); + setState(521); pattern(); } } @@ -3506,18 +3525,18 @@ public T accept(ParseTreeVisitor visitor) { public final PatternContext pattern() throws RecognitionException { PatternContext _localctx = new PatternContext(_ctx, getState()); - enterRule(_localctx, 52, RULE_pattern); + enterRule(_localctx, 54, RULE_pattern); try { enterOuterAlt(_localctx, 1); { - setState(532); + setState(523); ((PatternContext)_localctx).value = string(); - setState(534); + setState(525); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,72,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,70,_ctx) ) { case 1: { - setState(533); + setState(524); patternEscape(); } break; @@ -3563,27 +3582,27 @@ public T accept(ParseTreeVisitor visitor) { public final PatternEscapeContext patternEscape() throws RecognitionException { PatternEscapeContext _localctx = new PatternEscapeContext(_ctx, getState()); - enterRule(_localctx, 54, RULE_patternEscape); + enterRule(_localctx, 56, RULE_patternEscape); try { - setState(542); + setState(533); switch (_input.LA(1)) { case ESCAPE: enterOuterAlt(_localctx, 1); { - setState(536); + setState(527); match(ESCAPE); - setState(537); + setState(528); ((PatternEscapeContext)_localctx).escape = string(); } break; case ESCAPE_ESC: enterOuterAlt(_localctx, 2); { - setState(538); + setState(529); match(ESCAPE_ESC); - setState(539); + setState(530); ((PatternEscapeContext)_localctx).escape = string(); - setState(540); + setState(531); match(ESC_END); } break; @@ -3721,23 +3740,23 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti int _parentState = getState(); ValueExpressionContext _localctx = new ValueExpressionContext(_ctx, _parentState); ValueExpressionContext _prevctx = _localctx; - int _startState = 56; - enterRecursionRule(_localctx, 56, RULE_valueExpression, _p); + int _startState = 58; + enterRecursionRule(_localctx, 58, RULE_valueExpression, _p); int _la; try { int _alt; enterOuterAlt(_localctx, 1); { - setState(548); + setState(539); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,74,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,72,_ctx) ) { case 1: { _localctx = new ValueExpressionDefaultContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(545); + setState(536); primaryExpression(); } break; @@ -3746,7 +3765,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti _localctx = new ArithmeticUnaryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(546); + setState(537); ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -3754,31 +3773,31 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti } else { consume(); } - setState(547); + setState(538); valueExpression(4); } break; } _ctx.stop = _input.LT(-1); - setState(562); + setState(553); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,76,_ctx); + _alt = getInterpreter().adaptivePredict(_input,74,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(560); + setState(551); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,75,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,73,_ctx) ) { case 1: { _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(550); + setState(541); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(551); + setState(542); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(((((_la - 88)) & ~0x3f) == 0 && ((1L << (_la - 88)) & ((1L << (ASTERISK - 88)) | (1L << (SLASH - 88)) | (1L << (PERCENT - 88)))) != 0)) ) { @@ -3786,7 +3805,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti } else { consume(); } - setState(552); + setState(543); ((ArithmeticBinaryContext)_localctx).right = valueExpression(4); } break; @@ -3795,9 +3814,9 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(553); + setState(544); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(554); + setState(545); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -3805,7 +3824,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti } else { consume(); } - setState(555); + setState(546); ((ArithmeticBinaryContext)_localctx).right = valueExpression(3); } break; @@ -3814,20 +3833,20 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti _localctx = new ComparisonContext(new ValueExpressionContext(_parentctx, _parentState)); ((ComparisonContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(556); + setState(547); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(557); + setState(548); comparisonOperator(); - setState(558); + setState(549); ((ComparisonContext)_localctx).right = valueExpression(2); } break; } } } - setState(564); + setState(555); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,76,_ctx); + _alt = getInterpreter().adaptivePredict(_input,74,_ctx); } } } @@ -4029,17 +4048,17 @@ public T accept(ParseTreeVisitor visitor) { public final PrimaryExpressionContext primaryExpression() throws RecognitionException { PrimaryExpressionContext _localctx = new PrimaryExpressionContext(_ctx, getState()); - enterRule(_localctx, 58, RULE_primaryExpression); + enterRule(_localctx, 60, RULE_primaryExpression); int _la; try { - setState(586); + setState(577); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,78,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,76,_ctx) ) { case 1: _localctx = new CastContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(565); + setState(556); castExpression(); } break; @@ -4047,7 +4066,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new ExtractContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(566); + setState(557); extractExpression(); } break; @@ -4055,7 +4074,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new ConstantDefaultContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(567); + setState(558); constant(); } break; @@ -4063,7 +4082,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new StarContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(568); + setState(559); match(ASTERISK); } break; @@ -4071,18 +4090,18 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new StarContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(572); + setState(563); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(569); + setState(560); qualifiedName(); - setState(570); + setState(561); match(DOT); } } - setState(574); + setState(565); match(ASTERISK); } break; @@ -4090,7 +4109,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new FunctionContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(575); + setState(566); functionExpression(); } break; @@ -4098,11 +4117,11 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new SubqueryExpressionContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(576); + setState(567); match(T__0); - setState(577); + setState(568); query(); - setState(578); + setState(569); match(T__1); } break; @@ -4110,7 +4129,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new ColumnReferenceContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(580); + setState(571); identifier(); } break; @@ -4118,7 +4137,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new DereferenceContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(581); + setState(572); qualifiedName(); } break; @@ -4126,11 +4145,11 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce _localctx = new ParenthesizedExpressionContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(582); + setState(573); match(T__0); - setState(583); + setState(574); expression(); - setState(584); + setState(575); match(T__1); } break; @@ -4174,25 +4193,25 @@ public T accept(ParseTreeVisitor visitor) { public final CastExpressionContext castExpression() throws RecognitionException { CastExpressionContext _localctx = new CastExpressionContext(_ctx, getState()); - enterRule(_localctx, 60, RULE_castExpression); + enterRule(_localctx, 62, RULE_castExpression); try { - setState(593); + setState(584); switch (_input.LA(1)) { case CAST: enterOuterAlt(_localctx, 1); { - setState(588); + setState(579); castTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(589); + setState(580); match(FUNCTION_ESC); - setState(590); + setState(581); castTemplate(); - setState(591); + setState(582); match(ESC_END); } break; @@ -4241,21 +4260,21 @@ public T accept(ParseTreeVisitor visitor) { public final CastTemplateContext castTemplate() throws RecognitionException { CastTemplateContext _localctx = new CastTemplateContext(_ctx, getState()); - enterRule(_localctx, 62, RULE_castTemplate); + enterRule(_localctx, 64, RULE_castTemplate); try { enterOuterAlt(_localctx, 1); { - setState(595); + setState(586); match(CAST); - setState(596); + setState(587); match(T__0); - setState(597); + setState(588); expression(); - setState(598); + setState(589); match(AS); - setState(599); + setState(590); dataType(); - setState(600); + setState(591); match(T__1); } } @@ -4297,25 +4316,25 @@ public T accept(ParseTreeVisitor visitor) { public final ExtractExpressionContext extractExpression() throws RecognitionException { ExtractExpressionContext _localctx = new ExtractExpressionContext(_ctx, getState()); - enterRule(_localctx, 64, RULE_extractExpression); + enterRule(_localctx, 66, RULE_extractExpression); try { - setState(607); + setState(598); switch (_input.LA(1)) { case EXTRACT: enterOuterAlt(_localctx, 1); { - setState(602); + setState(593); extractTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(603); + setState(594); match(FUNCTION_ESC); - setState(604); + setState(595); extractTemplate(); - setState(605); + setState(596); match(ESC_END); } break; @@ -4365,21 +4384,21 @@ public T accept(ParseTreeVisitor visitor) { public final ExtractTemplateContext extractTemplate() throws RecognitionException { ExtractTemplateContext _localctx = new ExtractTemplateContext(_ctx, getState()); - enterRule(_localctx, 66, RULE_extractTemplate); + enterRule(_localctx, 68, RULE_extractTemplate); try { enterOuterAlt(_localctx, 1); { - setState(609); + setState(600); match(EXTRACT); - setState(610); + setState(601); match(T__0); - setState(611); + setState(602); ((ExtractTemplateContext)_localctx).field = identifier(); - setState(612); + setState(603); match(FROM); - setState(613); + setState(604); valueExpression(0); - setState(614); + setState(605); match(T__1); } } @@ -4420,9 +4439,9 @@ public T accept(ParseTreeVisitor visitor) { public final FunctionExpressionContext functionExpression() throws RecognitionException { FunctionExpressionContext _localctx = new FunctionExpressionContext(_ctx, getState()); - enterRule(_localctx, 68, RULE_functionExpression); + enterRule(_localctx, 70, RULE_functionExpression); try { - setState(621); + setState(612); switch (_input.LA(1)) { case ANALYZE: case ANALYZED: @@ -4457,18 +4476,18 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 1); { - setState(616); + setState(607); functionTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(617); + setState(608); match(FUNCTION_ESC); - setState(618); + setState(609); functionTemplate(); - setState(619); + setState(610); match(ESC_END); } break; @@ -4521,50 +4540,50 @@ public T accept(ParseTreeVisitor visitor) { public final FunctionTemplateContext functionTemplate() throws RecognitionException { FunctionTemplateContext _localctx = new FunctionTemplateContext(_ctx, getState()); - enterRule(_localctx, 70, RULE_functionTemplate); + enterRule(_localctx, 72, RULE_functionTemplate); int _la; try { enterOuterAlt(_localctx, 1); { - setState(623); + setState(614); functionName(); - setState(624); + setState(615); match(T__0); - setState(636); + setState(627); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LEFT) | (1L << MAPPED) | (1L << MATCH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RIGHT) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TRUE - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (FUNCTION_ESC - 64)) | (1L << (DATE_ESC - 64)) | (1L << (TIME_ESC - 64)) | (1L << (TIMESTAMP_ESC - 64)) | (1L << (GUID_ESC - 64)) | (1L << (PLUS - 64)) | (1L << (MINUS - 64)) | (1L << (ASTERISK - 64)) | (1L << (PARAM - 64)) | (1L << (STRING - 64)) | (1L << (INTEGER_VALUE - 64)) | (1L << (DECIMAL_VALUE - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(626); + setState(617); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(625); + setState(616); setQuantifier(); } } - setState(628); + setState(619); expression(); - setState(633); + setState(624); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(629); + setState(620); match(T__2); - setState(630); + setState(621); expression(); } } - setState(635); + setState(626); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(638); + setState(629); match(T__1); } } @@ -4606,21 +4625,21 @@ public T accept(ParseTreeVisitor visitor) { public final FunctionNameContext functionName() throws RecognitionException { FunctionNameContext _localctx = new FunctionNameContext(_ctx, getState()); - enterRule(_localctx, 72, RULE_functionName); + enterRule(_localctx, 74, RULE_functionName); try { - setState(643); + setState(634); switch (_input.LA(1)) { case LEFT: enterOuterAlt(_localctx, 1); { - setState(640); + setState(631); match(LEFT); } break; case RIGHT: enterOuterAlt(_localctx, 2); { - setState(641); + setState(632); match(RIGHT); } break; @@ -4655,7 +4674,7 @@ public final FunctionNameContext functionName() throws RecognitionException { case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 3); { - setState(642); + setState(633); identifier(); } break; @@ -4864,16 +4883,16 @@ public T accept(ParseTreeVisitor visitor) { public final ConstantContext constant() throws RecognitionException { ConstantContext _localctx = new ConstantContext(_ctx, getState()); - enterRule(_localctx, 74, RULE_constant); + enterRule(_localctx, 76, RULE_constant); try { int _alt; - setState(670); + setState(661); switch (_input.LA(1)) { case NULL: _localctx = new NullLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(645); + setState(636); match(NULL); } break; @@ -4884,7 +4903,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new NumericLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(646); + setState(637); number(); } break; @@ -4893,7 +4912,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanLiteralContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(647); + setState(638); booleanValue(); } break; @@ -4901,7 +4920,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringLiteralContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(649); + setState(640); _errHandler.sync(this); _alt = 1; do { @@ -4909,7 +4928,7 @@ public final ConstantContext constant() throws RecognitionException { case 1: { { - setState(648); + setState(639); match(STRING); } } @@ -4917,9 +4936,9 @@ public final ConstantContext constant() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(651); + setState(642); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,86,_ctx); + _alt = getInterpreter().adaptivePredict(_input,84,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; @@ -4927,7 +4946,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new ParamLiteralContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(653); + setState(644); match(PARAM); } break; @@ -4935,11 +4954,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new DateEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(654); + setState(645); match(DATE_ESC); - setState(655); + setState(646); string(); - setState(656); + setState(647); match(ESC_END); } break; @@ -4947,11 +4966,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new TimeEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(658); + setState(649); match(TIME_ESC); - setState(659); + setState(650); string(); - setState(660); + setState(651); match(ESC_END); } break; @@ -4959,11 +4978,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new TimestampEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(662); + setState(653); match(TIMESTAMP_ESC); - setState(663); + setState(654); string(); - setState(664); + setState(655); match(ESC_END); } break; @@ -4971,11 +4990,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new GuidEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(666); + setState(657); match(GUID_ESC); - setState(667); + setState(658); string(); - setState(668); + setState(659); match(ESC_END); } break; @@ -5022,12 +5041,12 @@ public T accept(ParseTreeVisitor visitor) { public final ComparisonOperatorContext comparisonOperator() throws RecognitionException { ComparisonOperatorContext _localctx = new ComparisonOperatorContext(_ctx, getState()); - enterRule(_localctx, 76, RULE_comparisonOperator); + enterRule(_localctx, 78, RULE_comparisonOperator); int _la; try { enterOuterAlt(_localctx, 1); { - setState(672); + setState(663); _la = _input.LA(1); if ( !(((((_la - 80)) & ~0x3f) == 0 && ((1L << (_la - 80)) & ((1L << (EQ - 80)) | (1L << (NEQ - 80)) | (1L << (LT - 80)) | (1L << (LTE - 80)) | (1L << (GT - 80)) | (1L << (GTE - 80)))) != 0)) ) { _errHandler.recoverInline(this); @@ -5071,12 +5090,12 @@ public T accept(ParseTreeVisitor visitor) { public final BooleanValueContext booleanValue() throws RecognitionException { BooleanValueContext _localctx = new BooleanValueContext(_ctx, getState()); - enterRule(_localctx, 78, RULE_booleanValue); + enterRule(_localctx, 80, RULE_booleanValue); int _la; try { enterOuterAlt(_localctx, 1); { - setState(674); + setState(665); _la = _input.LA(1); if ( !(_la==FALSE || _la==TRUE) ) { _errHandler.recoverInline(this); @@ -5129,12 +5148,12 @@ public T accept(ParseTreeVisitor visitor) { public final DataTypeContext dataType() throws RecognitionException { DataTypeContext _localctx = new DataTypeContext(_ctx, getState()); - enterRule(_localctx, 80, RULE_dataType); + enterRule(_localctx, 82, RULE_dataType); try { _localctx = new PrimitiveDataTypeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(676); + setState(667); identifier(); } } @@ -5181,30 +5200,30 @@ public T accept(ParseTreeVisitor visitor) { public final QualifiedNameContext qualifiedName() throws RecognitionException { QualifiedNameContext _localctx = new QualifiedNameContext(_ctx, getState()); - enterRule(_localctx, 82, RULE_qualifiedName); + enterRule(_localctx, 84, RULE_qualifiedName); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(683); + setState(674); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,88,_ctx); + _alt = getInterpreter().adaptivePredict(_input,86,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(678); + setState(669); identifier(); - setState(679); + setState(670); match(DOT); } } } - setState(685); + setState(676); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,88,_ctx); + _alt = getInterpreter().adaptivePredict(_input,86,_ctx); } - setState(686); + setState(677); identifier(); } } @@ -5247,15 +5266,15 @@ public T accept(ParseTreeVisitor visitor) { public final IdentifierContext identifier() throws RecognitionException { IdentifierContext _localctx = new IdentifierContext(_ctx, getState()); - enterRule(_localctx, 84, RULE_identifier); + enterRule(_localctx, 86, RULE_identifier); try { - setState(690); + setState(681); switch (_input.LA(1)) { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 1); { - setState(688); + setState(679); quoteIdentifier(); } break; @@ -5288,7 +5307,7 @@ public final IdentifierContext identifier() throws RecognitionException { case DIGIT_IDENTIFIER: enterOuterAlt(_localctx, 2); { - setState(689); + setState(680); unquoteIdentifier(); } break; @@ -5338,46 +5357,46 @@ public T accept(ParseTreeVisitor visitor) { public final TableIdentifierContext tableIdentifier() throws RecognitionException { TableIdentifierContext _localctx = new TableIdentifierContext(_ctx, getState()); - enterRule(_localctx, 86, RULE_tableIdentifier); + enterRule(_localctx, 88, RULE_tableIdentifier); int _la; try { - setState(704); + setState(695); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,92,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,90,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(695); + setState(686); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { { - setState(692); + setState(683); ((TableIdentifierContext)_localctx).catalog = identifier(); - setState(693); + setState(684); match(T__3); } } - setState(697); + setState(688); match(TABLE_IDENTIFIER); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(701); + setState(692); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,91,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,89,_ctx) ) { case 1: { - setState(698); + setState(689); ((TableIdentifierContext)_localctx).catalog = identifier(); - setState(699); + setState(690); match(T__3); } break; } - setState(703); + setState(694); ((TableIdentifierContext)_localctx).name = identifier(); } break; @@ -5442,15 +5461,15 @@ public T accept(ParseTreeVisitor visitor) { public final QuoteIdentifierContext quoteIdentifier() throws RecognitionException { QuoteIdentifierContext _localctx = new QuoteIdentifierContext(_ctx, getState()); - enterRule(_localctx, 88, RULE_quoteIdentifier); + enterRule(_localctx, 90, RULE_quoteIdentifier); try { - setState(708); + setState(699); switch (_input.LA(1)) { case QUOTED_IDENTIFIER: _localctx = new QuotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(706); + setState(697); match(QUOTED_IDENTIFIER); } break; @@ -5458,7 +5477,7 @@ public final QuoteIdentifierContext quoteIdentifier() throws RecognitionExceptio _localctx = new BackQuotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(707); + setState(698); match(BACKQUOTED_IDENTIFIER); } break; @@ -5528,15 +5547,15 @@ public T accept(ParseTreeVisitor visitor) { public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionException { UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState()); - enterRule(_localctx, 90, RULE_unquoteIdentifier); + enterRule(_localctx, 92, RULE_unquoteIdentifier); try { - setState(713); + setState(704); switch (_input.LA(1)) { case IDENTIFIER: _localctx = new UnquotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(710); + setState(701); match(IDENTIFIER); } break; @@ -5568,7 +5587,7 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce _localctx = new UnquotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(711); + setState(702); nonReserved(); } break; @@ -5576,7 +5595,7 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce _localctx = new DigitIdentifierContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(712); + setState(703); match(DIGIT_IDENTIFIER); } break; @@ -5647,21 +5666,21 @@ public T accept(ParseTreeVisitor visitor) { public final NumberContext number() throws RecognitionException { NumberContext _localctx = new NumberContext(_ctx, getState()); - enterRule(_localctx, 92, RULE_number); + enterRule(_localctx, 94, RULE_number); int _la; try { - setState(723); + setState(714); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,97,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,95,_ctx) ) { case 1: _localctx = new DecimalLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(716); + setState(707); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(715); + setState(706); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -5671,7 +5690,7 @@ public final NumberContext number() throws RecognitionException { } } - setState(718); + setState(709); match(DECIMAL_VALUE); } break; @@ -5679,11 +5698,11 @@ public final NumberContext number() throws RecognitionException { _localctx = new IntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(720); + setState(711); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(719); + setState(710); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -5693,7 +5712,7 @@ public final NumberContext number() throws RecognitionException { } } - setState(722); + setState(713); match(INTEGER_VALUE); } break; @@ -5734,12 +5753,12 @@ public T accept(ParseTreeVisitor visitor) { public final StringContext string() throws RecognitionException { StringContext _localctx = new StringContext(_ctx, getState()); - enterRule(_localctx, 94, RULE_string); + enterRule(_localctx, 96, RULE_string); int _la; try { enterOuterAlt(_localctx, 1); { - setState(725); + setState(716); _la = _input.LA(1); if ( !(_la==PARAM || _la==STRING) ) { _errHandler.recoverInline(this); @@ -5806,12 +5825,12 @@ public T accept(ParseTreeVisitor visitor) { public final NonReservedContext nonReserved() throws RecognitionException { NonReservedContext _localctx = new NonReservedContext(_ctx, getState()); - enterRule(_localctx, 96, RULE_nonReserved); + enterRule(_localctx, 98, RULE_nonReserved); int _la; try { enterOuterAlt(_localctx, 1); { - setState(727); + setState(718); _la = _input.LA(1); if ( !(((((_la - 6)) & ~0x3f) == 0 && ((1L << (_la - 6)) & ((1L << (ANALYZE - 6)) | (1L << (ANALYZED - 6)) | (1L << (CATALOGS - 6)) | (1L << (COLUMNS - 6)) | (1L << (DEBUG - 6)) | (1L << (EXECUTABLE - 6)) | (1L << (EXPLAIN - 6)) | (1L << (FORMAT - 6)) | (1L << (FUNCTIONS - 6)) | (1L << (GRAPHVIZ - 6)) | (1L << (MAPPED - 6)) | (1L << (OPTIMIZED - 6)) | (1L << (PARSED - 6)) | (1L << (PHYSICAL - 6)) | (1L << (PLAN - 6)) | (1L << (RLIKE - 6)) | (1L << (QUERY - 6)) | (1L << (SCHEMAS - 6)) | (1L << (SHOW - 6)) | (1L << (SYS - 6)) | (1L << (TABLES - 6)) | (1L << (TEXT - 6)) | (1L << (TYPE - 6)) | (1L << (TYPES - 6)) | (1L << (VERIFY - 6)))) != 0)) ) { _errHandler.recoverInline(this); @@ -5835,7 +5854,7 @@ public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { switch (ruleIndex) { case 22: return booleanExpression_sempred((BooleanExpressionContext)_localctx, predIndex); - case 28: + case 29: return valueExpression_sempred((ValueExpressionContext)_localctx, predIndex); } return true; @@ -5862,296 +5881,290 @@ private boolean valueExpression_sempred(ValueExpressionContext _localctx, int pr } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3l\u02dc\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3l\u02d3\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ "\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4"+ - ",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\3\2\3\2\3\2\3\3\3"+ - "\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4t\n\4\f\4\16\4w\13\4\3\4"+ - "\5\4z\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0083\n\4\f\4\16\4\u0086\13"+ - "\4\3\4\5\4\u0089\n\4\3\4\3\4\3\4\3\4\3\4\5\4\u0090\n\4\3\4\3\4\3\4\3\4"+ - "\3\4\5\4\u0097\n\4\3\4\3\4\3\4\5\4\u009c\n\4\3\4\3\4\3\4\5\4\u00a1\n\4"+ - "\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\5\4\u00ab\n\4\3\4\3\4\5\4\u00af\n\4\3"+ - "\4\3\4\3\4\3\4\7\4\u00b5\n\4\f\4\16\4\u00b8\13\4\5\4\u00ba\n\4\3\4\3\4"+ - "\3\4\3\4\5\4\u00c0\n\4\3\4\3\4\3\4\5\4\u00c5\n\4\3\4\5\4\u00c8\n\4\3\4"+ - "\3\4\3\4\3\4\3\4\5\4\u00cf\n\4\3\5\3\5\3\5\3\5\7\5\u00d5\n\5\f\5\16\5"+ - "\u00d8\13\5\5\5\u00da\n\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6\7\6\u00e4\n"+ - "\6\f\6\16\6\u00e7\13\6\5\6\u00e9\n\6\3\6\5\6\u00ec\n\6\3\7\3\7\3\7\3\7"+ - "\3\7\5\7\u00f3\n\7\3\b\3\b\3\b\3\b\3\b\5\b\u00fa\n\b\3\t\3\t\5\t\u00fe"+ - "\n\t\3\n\3\n\5\n\u0102\n\n\3\n\3\n\3\n\7\n\u0107\n\n\f\n\16\n\u010a\13"+ - "\n\3\n\5\n\u010d\n\n\3\n\3\n\5\n\u0111\n\n\3\n\3\n\3\n\5\n\u0116\n\n\3"+ - "\n\3\n\5\n\u011a\n\n\3\13\3\13\3\13\3\13\7\13\u0120\n\13\f\13\16\13\u0123"+ - "\13\13\3\f\5\f\u0126\n\f\3\f\3\f\3\f\7\f\u012b\n\f\f\f\16\f\u012e\13\f"+ - "\3\r\3\r\3\16\3\16\3\16\3\16\7\16\u0136\n\16\f\16\16\16\u0139\13\16\5"+ - "\16\u013b\n\16\3\16\3\16\5\16\u013f\n\16\3\17\3\17\3\17\3\17\3\17\3\17"+ - "\3\20\3\20\3\21\3\21\5\21\u014b\n\21\3\21\5\21\u014e\n\21\3\22\3\22\7"+ - "\22\u0152\n\22\f\22\16\22\u0155\13\22\3\23\3\23\3\23\3\23\5\23\u015b\n"+ - "\23\3\23\3\23\3\23\3\23\3\23\5\23\u0162\n\23\3\24\5\24\u0165\n\24\3\24"+ - "\3\24\5\24\u0169\n\24\3\24\3\24\5\24\u016d\n\24\3\24\3\24\5\24\u0171\n"+ - "\24\5\24\u0173\n\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\7\25\u017c\n\25"+ - "\f\25\16\25\u017f\13\25\3\25\3\25\5\25\u0183\n\25\3\26\3\26\5\26\u0187"+ - "\n\26\3\26\5\26\u018a\n\26\3\26\3\26\3\26\3\26\5\26\u0190\n\26\3\26\5"+ - "\26\u0193\n\26\3\26\3\26\3\26\3\26\5\26\u0199\n\26\3\26\5\26\u019c\n\26"+ - "\5\26\u019e\n\26\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30"+ - "\3\30\3\30\3\30\3\30\7\30\u01af\n\30\f\30\16\30\u01b2\13\30\3\30\3\30"+ - "\3\30\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u01bd\n\30\f\30\16\30\u01c0\13"+ - "\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u01cb\n\30\f\30"+ - "\16\30\u01ce\13\30\3\30\3\30\3\30\5\30\u01d3\n\30\3\30\3\30\3\30\3\30"+ - "\3\30\3\30\7\30\u01db\n\30\f\30\16\30\u01de\13\30\3\31\3\31\5\31\u01e2"+ - "\n\31\3\32\5\32\u01e5\n\32\3\32\3\32\3\32\3\32\3\32\3\32\5\32\u01ed\n"+ - "\32\3\32\3\32\3\32\3\32\3\32\7\32\u01f4\n\32\f\32\16\32\u01f7\13\32\3"+ - "\32\3\32\3\32\5\32\u01fc\n\32\3\32\3\32\3\32\3\32\3\32\3\32\5\32\u0204"+ - "\n\32\3\32\3\32\3\32\5\32\u0209\n\32\3\32\3\32\3\32\3\32\5\32\u020f\n"+ - "\32\3\32\5\32\u0212\n\32\3\33\3\33\3\33\3\34\3\34\5\34\u0219\n\34\3\35"+ - "\3\35\3\35\3\35\3\35\3\35\5\35\u0221\n\35\3\36\3\36\3\36\3\36\5\36\u0227"+ - "\n\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\7\36\u0233\n\36"+ - "\f\36\16\36\u0236\13\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\5\37\u023f"+ - "\n\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\5\37"+ - "\u024d\n\37\3 \3 \3 \3 \3 \5 \u0254\n \3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3"+ - "\"\3\"\3\"\5\"\u0262\n\"\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\5$\u0270"+ - "\n$\3%\3%\3%\5%\u0275\n%\3%\3%\3%\7%\u027a\n%\f%\16%\u027d\13%\5%\u027f"+ - "\n%\3%\3%\3&\3&\3&\5&\u0286\n&\3\'\3\'\3\'\3\'\6\'\u028c\n\'\r\'\16\'"+ - "\u028d\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'"+ - "\3\'\5\'\u02a1\n\'\3(\3(\3)\3)\3*\3*\3+\3+\3+\7+\u02ac\n+\f+\16+\u02af"+ - "\13+\3+\3+\3,\3,\5,\u02b5\n,\3-\3-\3-\5-\u02ba\n-\3-\3-\3-\3-\5-\u02c0"+ - "\n-\3-\5-\u02c3\n-\3.\3.\5.\u02c7\n.\3/\3/\3/\5/\u02cc\n/\3\60\5\60\u02cf"+ - "\n\60\3\60\3\60\5\60\u02d3\n\60\3\60\5\60\u02d6\n\60\3\61\3\61\3\62\3"+ - "\62\3\62\2\4.:\63\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62"+ - "\64\668:<>@BDFHJLNPRTVXZ\\^`b\2\20\b\2\7\7\t\t\31\31,,\62\62\66\66\4\2"+ - "\"\"BB\4\2\t\t\62\62\4\2\37\37%%\3\2\25\26\4\2\7\7aa\4\2\r\r\25\25\4\2"+ - "\7\7\27\27\3\2XY\3\2Z\\\3\2RW\4\2\35\35CC\3\2_`\20\2\b\t\22\24\31\31\33"+ - "\33\36\36!\",,\62\62\668:<>?ABDEGG\u0338\2d\3\2\2\2\4g\3\2\2\2\6\u00ce"+ - "\3\2\2\2\b\u00d9\3\2\2\2\n\u00dd\3\2\2\2\f\u00f2\3\2\2\2\16\u00f9\3\2"+ - "\2\2\20\u00fb\3\2\2\2\22\u00ff\3\2\2\2\24\u011b\3\2\2\2\26\u0125\3\2\2"+ - "\2\30\u012f\3\2\2\2\32\u013e\3\2\2\2\34\u0140\3\2\2\2\36\u0146\3\2\2\2"+ - " \u0148\3\2\2\2\"\u014f\3\2\2\2$\u0161\3\2\2\2&\u0172\3\2\2\2(\u0182\3"+ - "\2\2\2*\u019d\3\2\2\2,\u019f\3\2\2\2.\u01d2\3\2\2\2\60\u01df\3\2\2\2\62"+ - "\u0211\3\2\2\2\64\u0213\3\2\2\2\66\u0216\3\2\2\28\u0220\3\2\2\2:\u0226"+ - "\3\2\2\2<\u024c\3\2\2\2>\u0253\3\2\2\2@\u0255\3\2\2\2B\u0261\3\2\2\2D"+ - "\u0263\3\2\2\2F\u026f\3\2\2\2H\u0271\3\2\2\2J\u0285\3\2\2\2L\u02a0\3\2"+ - "\2\2N\u02a2\3\2\2\2P\u02a4\3\2\2\2R\u02a6\3\2\2\2T\u02ad\3\2\2\2V\u02b4"+ - "\3\2\2\2X\u02c2\3\2\2\2Z\u02c6\3\2\2\2\\\u02cb\3\2\2\2^\u02d5\3\2\2\2"+ - "`\u02d7\3\2\2\2b\u02d9\3\2\2\2de\5\6\4\2ef\7\2\2\3f\3\3\2\2\2gh\5,\27"+ - "\2hi\7\2\2\3i\5\3\2\2\2j\u00cf\5\b\5\2ky\7\33\2\2lu\7\3\2\2mn\78\2\2n"+ - "t\t\2\2\2op\7\36\2\2pt\t\3\2\2qr\7G\2\2rt\5P)\2sm\3\2\2\2so\3\2\2\2sq"+ - "\3\2\2\2tw\3\2\2\2us\3\2\2\2uv\3\2\2\2vx\3\2\2\2wu\3\2\2\2xz\7\4\2\2y"+ - "l\3\2\2\2yz\3\2\2\2z{\3\2\2\2{\u00cf\5\6\4\2|\u0088\7\24\2\2}\u0084\7"+ - "\3\2\2~\177\78\2\2\177\u0083\t\4\2\2\u0080\u0081\7\36\2\2\u0081\u0083"+ - "\t\3\2\2\u0082~\3\2\2\2\u0082\u0080\3\2\2\2\u0083\u0086\3\2\2\2\u0084"+ - "\u0082\3\2\2\2\u0084\u0085\3\2\2\2\u0085\u0087\3\2\2\2\u0086\u0084\3\2"+ - "\2\2\u0087\u0089\7\4\2\2\u0088}\3\2\2\2\u0088\u0089\3\2\2\2\u0089\u008a"+ - "\3\2\2\2\u008a\u00cf\5\6\4\2\u008b\u008c\7>\2\2\u008c\u008f\7A\2\2\u008d"+ - "\u0090\5\64\33\2\u008e\u0090\5X-\2\u008f\u008d\3\2\2\2\u008f\u008e\3\2"+ - "\2\2\u008f\u0090\3\2\2\2\u0090\u00cf\3\2\2\2\u0091\u0092\7>\2\2\u0092"+ - "\u0093\7\23\2\2\u0093\u0096\t\5\2\2\u0094\u0097\5\64\33\2\u0095\u0097"+ - "\5X-\2\u0096\u0094\3\2\2\2\u0096\u0095\3\2\2\2\u0097\u00cf\3\2\2\2\u0098"+ - "\u009b\t\6\2\2\u0099\u009c\5\64\33\2\u009a\u009c\5X-\2\u009b\u0099\3\2"+ - "\2\2\u009b\u009a\3\2\2\2\u009c\u00cf\3\2\2\2\u009d\u009e\7>\2\2\u009e"+ - "\u00a0\7!\2\2\u009f\u00a1\5\64\33\2\u00a0\u009f\3\2\2\2\u00a0\u00a1\3"+ - "\2\2\2\u00a1\u00cf\3\2\2\2\u00a2\u00a3\7>\2\2\u00a3\u00cf\7<\2\2\u00a4"+ - "\u00a5\7?\2\2\u00a5\u00cf\7\22\2\2\u00a6\u00a7\7?\2\2\u00a7\u00aa\7A\2"+ - "\2\u00a8\u00a9\7\21\2\2\u00a9\u00ab\5\64\33\2\u00aa\u00a8\3\2\2\2\u00aa"+ - "\u00ab\3\2\2\2\u00ab\u00ae\3\2\2\2\u00ac\u00af\5\64\33\2\u00ad\u00af\5"+ - "X-\2\u00ae\u00ac\3\2\2\2\u00ae\u00ad\3\2\2\2\u00ae\u00af\3\2\2\2\u00af"+ - "\u00b9\3\2\2\2\u00b0\u00b1\7D\2\2\u00b1\u00b6\5`\61\2\u00b2\u00b3\7\5"+ - "\2\2\u00b3\u00b5\5`\61\2\u00b4\u00b2\3\2\2\2\u00b5\u00b8\3\2\2\2\u00b6"+ - "\u00b4\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00ba\3\2\2\2\u00b8\u00b6\3\2"+ - "\2\2\u00b9\u00b0\3\2\2\2\u00b9\u00ba\3\2\2\2\u00ba\u00cf\3\2\2\2\u00bb"+ - "\u00bc\7?\2\2\u00bc\u00bf\7\23\2\2\u00bd\u00be\7\21\2\2\u00be\u00c0\5"+ - "`\61\2\u00bf\u00bd\3\2\2\2\u00bf\u00c0\3\2\2\2\u00c0\u00c4\3\2\2\2\u00c1"+ - "\u00c2\7@\2\2\u00c2\u00c5\5\64\33\2\u00c3\u00c5\5X-\2\u00c4\u00c1\3\2"+ - "\2\2\u00c4\u00c3\3\2\2\2\u00c4\u00c5\3\2\2\2\u00c5\u00c7\3\2\2\2\u00c6"+ - "\u00c8\5\64\33\2\u00c7\u00c6\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8\u00cf\3"+ - "\2\2\2\u00c9\u00ca\7?\2\2\u00ca\u00cf\7E\2\2\u00cb\u00cc\7?\2\2\u00cc"+ - "\u00cd\7@\2\2\u00cd\u00cf\7E\2\2\u00cej\3\2\2\2\u00cek\3\2\2\2\u00ce|"+ - "\3\2\2\2\u00ce\u008b\3\2\2\2\u00ce\u0091\3\2\2\2\u00ce\u0098\3\2\2\2\u00ce"+ - "\u009d\3\2\2\2\u00ce\u00a2\3\2\2\2\u00ce\u00a4\3\2\2\2\u00ce\u00a6\3\2"+ - "\2\2\u00ce\u00bb\3\2\2\2\u00ce\u00c9\3\2\2\2\u00ce\u00cb\3\2\2\2\u00cf"+ - "\7\3\2\2\2\u00d0\u00d1\7I\2\2\u00d1\u00d6\5\34\17\2\u00d2\u00d3\7\5\2"+ - "\2\u00d3\u00d5\5\34\17\2\u00d4\u00d2\3\2\2\2\u00d5\u00d8\3\2\2\2\u00d6"+ - "\u00d4\3\2\2\2\u00d6\u00d7\3\2\2\2\u00d7\u00da\3\2\2\2\u00d8\u00d6\3\2"+ - "\2\2\u00d9\u00d0\3\2\2\2\u00d9\u00da\3\2\2\2\u00da\u00db\3\2\2\2\u00db"+ - "\u00dc\5\n\6\2\u00dc\t\3\2\2\2\u00dd\u00e8\5\16\b\2\u00de\u00df\7\64\2"+ - "\2\u00df\u00e0\7\17\2\2\u00e0\u00e5\5\20\t\2\u00e1\u00e2\7\5\2\2\u00e2"+ - "\u00e4\5\20\t\2\u00e3\u00e1\3\2\2\2\u00e4\u00e7\3\2\2\2\u00e5\u00e3\3"+ - "\2\2\2\u00e5\u00e6\3\2\2\2\u00e6\u00e9\3\2\2\2\u00e7\u00e5\3\2\2\2\u00e8"+ - "\u00de\3\2\2\2\u00e8\u00e9\3\2\2\2\u00e9\u00eb\3\2\2\2\u00ea\u00ec\5\f"+ - "\7\2\u00eb\u00ea\3\2\2\2\u00eb\u00ec\3\2\2\2\u00ec\13\3\2\2\2\u00ed\u00ee"+ - "\7+\2\2\u00ee\u00f3\t\7\2\2\u00ef\u00f0\7L\2\2\u00f0\u00f1\t\7\2\2\u00f1"+ - "\u00f3\7Q\2\2\u00f2\u00ed\3\2\2\2\u00f2\u00ef\3\2\2\2\u00f3\r\3\2\2\2"+ - "\u00f4\u00fa\5\22\n\2\u00f5\u00f6\7\3\2\2\u00f6\u00f7\5\n\6\2\u00f7\u00f8"+ - "\7\4\2\2\u00f8\u00fa\3\2\2\2\u00f9\u00f4\3\2\2\2\u00f9\u00f5\3\2\2\2\u00fa"+ - "\17\3\2\2\2\u00fb\u00fd\5,\27\2\u00fc\u00fe\t\b\2\2\u00fd\u00fc\3\2\2"+ - "\2\u00fd\u00fe\3\2\2\2\u00fe\21\3\2\2\2\u00ff\u0101\7=\2\2\u0100\u0102"+ - "\5\36\20\2\u0101\u0100\3\2\2\2\u0101\u0102\3\2\2\2\u0102\u0103\3\2\2\2"+ - "\u0103\u0108\5 \21\2\u0104\u0105\7\5\2\2\u0105\u0107\5 \21\2\u0106\u0104"+ - "\3\2\2\2\u0107\u010a\3\2\2\2\u0108\u0106\3\2\2\2\u0108\u0109\3\2\2\2\u0109"+ - "\u010c\3\2\2\2\u010a\u0108\3\2\2\2\u010b\u010d\5\24\13\2\u010c\u010b\3"+ - "\2\2\2\u010c\u010d\3\2\2\2\u010d\u0110\3\2\2\2\u010e\u010f\7H\2\2\u010f"+ - "\u0111\5.\30\2\u0110\u010e\3\2\2\2\u0110\u0111\3\2\2\2\u0111\u0115\3\2"+ - "\2\2\u0112\u0113\7#\2\2\u0113\u0114\7\17\2\2\u0114\u0116\5\26\f\2\u0115"+ - "\u0112\3\2\2\2\u0115\u0116\3\2\2\2\u0116\u0119\3\2\2\2\u0117\u0118\7$"+ - "\2\2\u0118\u011a\5.\30\2\u0119\u0117\3\2\2\2\u0119\u011a\3\2\2\2\u011a"+ - "\23\3\2\2\2\u011b\u011c\7\37\2\2\u011c\u0121\5\"\22\2\u011d\u011e\7\5"+ - "\2\2\u011e\u0120\5\"\22\2\u011f\u011d\3\2\2\2\u0120\u0123\3\2\2\2\u0121"+ - "\u011f\3\2\2\2\u0121\u0122\3\2\2\2\u0122\25\3\2\2\2\u0123\u0121\3\2\2"+ - "\2\u0124\u0126\5\36\20\2\u0125\u0124\3\2\2\2\u0125\u0126\3\2\2\2\u0126"+ - "\u0127\3\2\2\2\u0127\u012c\5\30\r\2\u0128\u0129\7\5\2\2\u0129\u012b\5"+ - "\30\r\2\u012a\u0128\3\2\2\2\u012b\u012e\3\2\2\2\u012c\u012a\3\2\2\2\u012c"+ - "\u012d\3\2\2\2\u012d\27\3\2\2\2\u012e\u012c\3\2\2\2\u012f\u0130\5\32\16"+ - "\2\u0130\31\3\2\2\2\u0131\u013a\7\3\2\2\u0132\u0137\5,\27\2\u0133\u0134"+ - "\7\5\2\2\u0134\u0136\5,\27\2\u0135\u0133\3\2\2\2\u0136\u0139\3\2\2\2\u0137"+ - "\u0135\3\2\2\2\u0137\u0138\3\2\2\2\u0138\u013b\3\2\2\2\u0139\u0137\3\2"+ - "\2\2\u013a\u0132\3\2\2\2\u013a\u013b\3\2\2\2\u013b\u013c\3\2\2\2\u013c"+ - "\u013f\7\4\2\2\u013d\u013f\5,\27\2\u013e\u0131\3\2\2\2\u013e\u013d\3\2"+ - "\2\2\u013f\33\3\2\2\2\u0140\u0141\5V,\2\u0141\u0142\7\f\2\2\u0142\u0143"+ - "\7\3\2\2\u0143\u0144\5\n\6\2\u0144\u0145\7\4\2\2\u0145\35\3\2\2\2\u0146"+ - "\u0147\t\t\2\2\u0147\37\3\2\2\2\u0148\u014d\5,\27\2\u0149\u014b\7\f\2"+ - "\2\u014a\u0149\3\2\2\2\u014a\u014b\3\2\2\2\u014b\u014c\3\2\2\2\u014c\u014e"+ - "\5V,\2\u014d\u014a\3\2\2\2\u014d\u014e\3\2\2\2\u014e!\3\2\2\2\u014f\u0153"+ - "\5*\26\2\u0150\u0152\5$\23\2\u0151\u0150\3\2\2\2\u0152\u0155\3\2\2\2\u0153"+ - "\u0151\3\2\2\2\u0153\u0154\3\2\2\2\u0154#\3\2\2\2\u0155\u0153\3\2\2\2"+ - "\u0156\u0157\5&\24\2\u0157\u0158\7(\2\2\u0158\u015a\5*\26\2\u0159\u015b"+ - "\5(\25\2\u015a\u0159\3\2\2\2\u015a\u015b\3\2\2\2\u015b\u0162\3\2\2\2\u015c"+ - "\u015d\7.\2\2\u015d\u015e\5&\24\2\u015e\u015f\7(\2\2\u015f\u0160\5*\26"+ - "\2\u0160\u0162\3\2\2\2\u0161\u0156\3\2\2\2\u0161\u015c\3\2\2\2\u0162%"+ - "\3\2\2\2\u0163\u0165\7&\2\2\u0164\u0163\3\2\2\2\u0164\u0165\3\2\2\2\u0165"+ - "\u0173\3\2\2\2\u0166\u0168\7)\2\2\u0167\u0169\7\65\2\2\u0168\u0167\3\2"+ - "\2\2\u0168\u0169\3\2\2\2\u0169\u0173\3\2\2\2\u016a\u016c\79\2\2\u016b"+ - "\u016d\7\65\2\2\u016c\u016b\3\2\2\2\u016c\u016d\3\2\2\2\u016d\u0173\3"+ - "\2\2\2\u016e\u0170\7 \2\2\u016f\u0171\7\65\2\2\u0170\u016f\3\2\2\2\u0170"+ - "\u0171\3\2\2\2\u0171\u0173\3\2\2\2\u0172\u0164\3\2\2\2\u0172\u0166\3\2"+ - "\2\2\u0172\u016a\3\2\2\2\u0172\u016e\3\2\2\2\u0173\'\3\2\2\2\u0174\u0175"+ - "\7\61\2\2\u0175\u0183\5.\30\2\u0176\u0177\7F\2\2\u0177\u0178\7\3\2\2\u0178"+ - "\u017d\5V,\2\u0179\u017a\7\5\2\2\u017a\u017c\5V,\2\u017b\u0179\3\2\2\2"+ - "\u017c\u017f\3\2\2\2\u017d\u017b\3\2\2\2\u017d\u017e\3\2\2\2\u017e\u0180"+ - "\3\2\2\2\u017f\u017d\3\2\2\2\u0180\u0181\7\4\2\2\u0181\u0183\3\2\2\2\u0182"+ - "\u0174\3\2\2\2\u0182\u0176\3\2\2\2\u0183)\3\2\2\2\u0184\u0189\5X-\2\u0185"+ - "\u0187\7\f\2\2\u0186\u0185\3\2\2\2\u0186\u0187\3\2\2\2\u0187\u0188\3\2"+ - "\2\2\u0188\u018a\5T+\2\u0189\u0186\3\2\2\2\u0189\u018a\3\2\2\2\u018a\u019e"+ - "\3\2\2\2\u018b\u018c\7\3\2\2\u018c\u018d\5\n\6\2\u018d\u0192\7\4\2\2\u018e"+ - "\u0190\7\f\2\2\u018f\u018e\3\2\2\2\u018f\u0190\3\2\2\2\u0190\u0191\3\2"+ - "\2\2\u0191\u0193\5T+\2\u0192\u018f\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u019e"+ - "\3\2\2\2\u0194\u0195\7\3\2\2\u0195\u0196\5\"\22\2\u0196\u019b\7\4\2\2"+ - "\u0197\u0199\7\f\2\2\u0198\u0197\3\2\2\2\u0198\u0199\3\2\2\2\u0199\u019a"+ - "\3\2\2\2\u019a\u019c\5T+\2\u019b\u0198\3\2\2\2\u019b\u019c\3\2\2\2\u019c"+ - "\u019e\3\2\2\2\u019d\u0184\3\2\2\2\u019d\u018b\3\2\2\2\u019d\u0194\3\2"+ - "\2\2\u019e+\3\2\2\2\u019f\u01a0\5.\30\2\u01a0-\3\2\2\2\u01a1\u01a2\b\30"+ - "\1\2\u01a2\u01a3\7/\2\2\u01a3\u01d3\5.\30\n\u01a4\u01a5\7\32\2\2\u01a5"+ - "\u01a6\7\3\2\2\u01a6\u01a7\5\b\5\2\u01a7\u01a8\7\4\2\2\u01a8\u01d3\3\2"+ - "\2\2\u01a9\u01aa\7;\2\2\u01aa\u01ab\7\3\2\2\u01ab\u01b0\5`\61\2\u01ac"+ - "\u01ad\7\5\2\2\u01ad\u01af\5`\61\2\u01ae\u01ac\3\2\2\2\u01af\u01b2\3\2"+ - "\2\2\u01b0\u01ae\3\2\2\2\u01b0\u01b1\3\2\2\2\u01b1\u01b3\3\2\2\2\u01b2"+ - "\u01b0\3\2\2\2\u01b3\u01b4\7\4\2\2\u01b4\u01d3\3\2\2\2\u01b5\u01b6\7-"+ - "\2\2\u01b6\u01b7\7\3\2\2\u01b7\u01b8\5T+\2\u01b8\u01b9\7\5\2\2\u01b9\u01be"+ - "\5`\61\2\u01ba\u01bb\7\5\2\2\u01bb\u01bd\5`\61\2\u01bc\u01ba\3\2\2\2\u01bd"+ - "\u01c0\3\2\2\2\u01be\u01bc\3\2\2\2\u01be\u01bf\3\2\2\2\u01bf\u01c1\3\2"+ - "\2\2\u01c0\u01be\3\2\2\2\u01c1\u01c2\7\4\2\2\u01c2\u01d3\3\2\2\2\u01c3"+ - "\u01c4\7-\2\2\u01c4\u01c5\7\3\2\2\u01c5\u01c6\5`\61\2\u01c6\u01c7\7\5"+ - "\2\2\u01c7\u01cc\5`\61\2\u01c8\u01c9\7\5\2\2\u01c9\u01cb\5`\61\2\u01ca"+ - "\u01c8\3\2\2\2\u01cb\u01ce\3\2\2\2\u01cc\u01ca\3\2\2\2\u01cc\u01cd\3\2"+ - "\2\2\u01cd\u01cf\3\2\2\2\u01ce\u01cc\3\2\2\2\u01cf\u01d0\7\4\2\2\u01d0"+ - "\u01d3\3\2\2\2\u01d1\u01d3\5\60\31\2\u01d2\u01a1\3\2\2\2\u01d2\u01a4\3"+ - "\2\2\2\u01d2\u01a9\3\2\2\2\u01d2\u01b5\3\2\2\2\u01d2\u01c3\3\2\2\2\u01d2"+ - "\u01d1\3\2\2\2\u01d3\u01dc\3\2\2\2\u01d4\u01d5\f\4\2\2\u01d5\u01d6\7\n"+ - "\2\2\u01d6\u01db\5.\30\5\u01d7\u01d8\f\3\2\2\u01d8\u01d9\7\63\2\2\u01d9"+ - "\u01db\5.\30\4\u01da\u01d4\3\2\2\2\u01da\u01d7\3\2\2\2\u01db\u01de\3\2"+ - "\2\2\u01dc\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd/\3\2\2\2\u01de\u01dc"+ - "\3\2\2\2\u01df\u01e1\5:\36\2\u01e0\u01e2\5\62\32\2\u01e1\u01e0\3\2\2\2"+ - "\u01e1\u01e2\3\2\2\2\u01e2\61\3\2\2\2\u01e3\u01e5\7/\2\2\u01e4\u01e3\3"+ - "\2\2\2\u01e4\u01e5\3\2\2\2\u01e5\u01e6\3\2\2\2\u01e6\u01e7\7\16\2\2\u01e7"+ - "\u01e8\5:\36\2\u01e8\u01e9\7\n\2\2\u01e9\u01ea\5:\36\2\u01ea\u0212\3\2"+ - "\2\2\u01eb\u01ed\7/\2\2\u01ec\u01eb\3\2\2\2\u01ec\u01ed\3\2\2\2\u01ed"+ - "\u01ee\3\2\2\2\u01ee\u01ef\7%\2\2\u01ef\u01f0\7\3\2\2\u01f0\u01f5\5,\27"+ - "\2\u01f1\u01f2\7\5\2\2\u01f2\u01f4\5,\27\2\u01f3\u01f1\3\2\2\2\u01f4\u01f7"+ - "\3\2\2\2\u01f5\u01f3\3\2\2\2\u01f5\u01f6\3\2\2\2\u01f6\u01f8\3\2\2\2\u01f7"+ - "\u01f5\3\2\2\2\u01f8\u01f9\7\4\2\2\u01f9\u0212\3\2\2\2\u01fa\u01fc\7/"+ - "\2\2\u01fb\u01fa\3\2\2\2\u01fb\u01fc\3\2\2\2\u01fc\u01fd\3\2\2\2\u01fd"+ - "\u01fe\7%\2\2\u01fe\u01ff\7\3\2\2\u01ff\u0200\5\b\5\2\u0200\u0201\7\4"+ - "\2\2\u0201\u0212\3\2\2\2\u0202\u0204\7/\2\2\u0203\u0202\3\2\2\2\u0203"+ - "\u0204\3\2\2\2\u0204\u0205\3\2\2\2\u0205\u0206\7*\2\2\u0206\u0212\5\66"+ - "\34\2\u0207\u0209\7/\2\2\u0208\u0207\3\2\2\2\u0208\u0209\3\2\2\2\u0209"+ - "\u020a\3\2\2\2\u020a\u020b\7:\2\2\u020b\u0212\5`\61\2\u020c\u020e\7\'"+ - "\2\2\u020d\u020f\7/\2\2\u020e\u020d\3\2\2\2\u020e\u020f\3\2\2\2\u020f"+ - "\u0210\3\2\2\2\u0210\u0212\7\60\2\2\u0211\u01e4\3\2\2\2\u0211\u01ec\3"+ - "\2\2\2\u0211\u01fb\3\2\2\2\u0211\u0203\3\2\2\2\u0211\u0208\3\2\2\2\u0211"+ - "\u020c\3\2\2\2\u0212\63\3\2\2\2\u0213\u0214\7*\2\2\u0214\u0215\5\66\34"+ - "\2\u0215\65\3\2\2\2\u0216\u0218\5`\61\2\u0217\u0219\58\35\2\u0218\u0217"+ - "\3\2\2\2\u0218\u0219\3\2\2\2\u0219\67\3\2\2\2\u021a\u021b\7\30\2\2\u021b"+ - "\u0221\5`\61\2\u021c\u021d\7J\2\2\u021d\u021e\5`\61\2\u021e\u021f\7Q\2"+ - "\2\u021f\u0221\3\2\2\2\u0220\u021a\3\2\2\2\u0220\u021c\3\2\2\2\u02219"+ - "\3\2\2\2\u0222\u0223\b\36\1\2\u0223\u0227\5<\37\2\u0224\u0225\t\n\2\2"+ - "\u0225\u0227\5:\36\6\u0226\u0222\3\2\2\2\u0226\u0224\3\2\2\2\u0227\u0234"+ - "\3\2\2\2\u0228\u0229\f\5\2\2\u0229\u022a\t\13\2\2\u022a\u0233\5:\36\6"+ - "\u022b\u022c\f\4\2\2\u022c\u022d\t\n\2\2\u022d\u0233\5:\36\5\u022e\u022f"+ - "\f\3\2\2\u022f\u0230\5N(\2\u0230\u0231\5:\36\4\u0231\u0233\3\2\2\2\u0232"+ - "\u0228\3\2\2\2\u0232\u022b\3\2\2\2\u0232\u022e\3\2\2\2\u0233\u0236\3\2"+ - "\2\2\u0234\u0232\3\2\2\2\u0234\u0235\3\2\2\2\u0235;\3\2\2\2\u0236\u0234"+ - "\3\2\2\2\u0237\u024d\5> \2\u0238\u024d\5B\"\2\u0239\u024d\5L\'\2\u023a"+ - "\u024d\7Z\2\2\u023b\u023c\5T+\2\u023c\u023d\7^\2\2\u023d\u023f\3\2\2\2"+ - "\u023e\u023b\3\2\2\2\u023e\u023f\3\2\2\2\u023f\u0240\3\2\2\2\u0240\u024d"+ - "\7Z\2\2\u0241\u024d\5F$\2\u0242\u0243\7\3\2\2\u0243\u0244\5\b\5\2\u0244"+ - "\u0245\7\4\2\2\u0245\u024d\3\2\2\2\u0246\u024d\5V,\2\u0247\u024d\5T+\2"+ - "\u0248\u0249\7\3\2\2\u0249\u024a\5,\27\2\u024a\u024b\7\4\2\2\u024b\u024d"+ - "\3\2\2\2\u024c\u0237\3\2\2\2\u024c\u0238\3\2\2\2\u024c\u0239\3\2\2\2\u024c"+ - "\u023a\3\2\2\2\u024c\u023e\3\2\2\2\u024c\u0241\3\2\2\2\u024c\u0242\3\2"+ - "\2\2\u024c\u0246\3\2\2\2\u024c\u0247\3\2\2\2\u024c\u0248\3\2\2\2\u024d"+ - "=\3\2\2\2\u024e\u0254\5@!\2\u024f\u0250\7K\2\2\u0250\u0251\5@!\2\u0251"+ - "\u0252\7Q\2\2\u0252\u0254\3\2\2\2\u0253\u024e\3\2\2\2\u0253\u024f\3\2"+ - "\2\2\u0254?\3\2\2\2\u0255\u0256\7\20\2\2\u0256\u0257\7\3\2\2\u0257\u0258"+ - "\5,\27\2\u0258\u0259\7\f\2\2\u0259\u025a\5R*\2\u025a\u025b\7\4\2\2\u025b"+ - "A\3\2\2\2\u025c\u0262\5D#\2\u025d\u025e\7K\2\2\u025e\u025f\5D#\2\u025f"+ - "\u0260\7Q\2\2\u0260\u0262\3\2\2\2\u0261\u025c\3\2\2\2\u0261\u025d\3\2"+ - "\2\2\u0262C\3\2\2\2\u0263\u0264\7\34\2\2\u0264\u0265\7\3\2\2\u0265\u0266"+ - "\5V,\2\u0266\u0267\7\37\2\2\u0267\u0268\5:\36\2\u0268\u0269\7\4\2\2\u0269"+ - "E\3\2\2\2\u026a\u0270\5H%\2\u026b\u026c\7K\2\2\u026c\u026d\5H%\2\u026d"+ - "\u026e\7Q\2\2\u026e\u0270\3\2\2\2\u026f\u026a\3\2\2\2\u026f\u026b\3\2"+ - "\2\2\u0270G\3\2\2\2\u0271\u0272\5J&\2\u0272\u027e\7\3\2\2\u0273\u0275"+ - "\5\36\20\2\u0274\u0273\3\2\2\2\u0274\u0275\3\2\2\2\u0275\u0276\3\2\2\2"+ - "\u0276\u027b\5,\27\2\u0277\u0278\7\5\2\2\u0278\u027a\5,\27\2\u0279\u0277"+ - "\3\2\2\2\u027a\u027d\3\2\2\2\u027b\u0279\3\2\2\2\u027b\u027c\3\2\2\2\u027c"+ - "\u027f\3\2\2\2\u027d\u027b\3\2\2\2\u027e\u0274\3\2\2\2\u027e\u027f\3\2"+ - "\2\2\u027f\u0280\3\2\2\2\u0280\u0281\7\4\2\2\u0281I\3\2\2\2\u0282\u0286"+ - "\7)\2\2\u0283\u0286\79\2\2\u0284\u0286\5V,\2\u0285\u0282\3\2\2\2\u0285"+ - "\u0283\3\2\2\2\u0285\u0284\3\2\2\2\u0286K\3\2\2\2\u0287\u02a1\7\60\2\2"+ - "\u0288\u02a1\5^\60\2\u0289\u02a1\5P)\2\u028a\u028c\7`\2\2\u028b\u028a"+ - "\3\2\2\2\u028c\u028d\3\2\2\2\u028d\u028b\3\2\2\2\u028d\u028e\3\2\2\2\u028e"+ - "\u02a1\3\2\2\2\u028f\u02a1\7_\2\2\u0290\u0291\7M\2\2\u0291\u0292\5`\61"+ - "\2\u0292\u0293\7Q\2\2\u0293\u02a1\3\2\2\2\u0294\u0295\7N\2\2\u0295\u0296"+ - "\5`\61\2\u0296\u0297\7Q\2\2\u0297\u02a1\3\2\2\2\u0298\u0299\7O\2\2\u0299"+ - "\u029a\5`\61\2\u029a\u029b\7Q\2\2\u029b\u02a1\3\2\2\2\u029c\u029d\7P\2"+ - "\2\u029d\u029e\5`\61\2\u029e\u029f\7Q\2\2\u029f\u02a1\3\2\2\2\u02a0\u0287"+ - "\3\2\2\2\u02a0\u0288\3\2\2\2\u02a0\u0289\3\2\2\2\u02a0\u028b\3\2\2\2\u02a0"+ - "\u028f\3\2\2\2\u02a0\u0290\3\2\2\2\u02a0\u0294\3\2\2\2\u02a0\u0298\3\2"+ - "\2\2\u02a0\u029c\3\2\2\2\u02a1M\3\2\2\2\u02a2\u02a3\t\f\2\2\u02a3O\3\2"+ - "\2\2\u02a4\u02a5\t\r\2\2\u02a5Q\3\2\2\2\u02a6\u02a7\5V,\2\u02a7S\3\2\2"+ - "\2\u02a8\u02a9\5V,\2\u02a9\u02aa\7^\2\2\u02aa\u02ac\3\2\2\2\u02ab\u02a8"+ - "\3\2\2\2\u02ac\u02af\3\2\2\2\u02ad\u02ab\3\2\2\2\u02ad\u02ae\3\2\2\2\u02ae"+ - "\u02b0\3\2\2\2\u02af\u02ad\3\2\2\2\u02b0\u02b1\5V,\2\u02b1U\3\2\2\2\u02b2"+ - "\u02b5\5Z.\2\u02b3\u02b5\5\\/\2\u02b4\u02b2\3\2\2\2\u02b4\u02b3\3\2\2"+ - "\2\u02b5W\3\2\2\2\u02b6\u02b7\5V,\2\u02b7\u02b8\7\6\2\2\u02b8\u02ba\3"+ - "\2\2\2\u02b9\u02b6\3\2\2\2\u02b9\u02ba\3\2\2\2\u02ba\u02bb\3\2\2\2\u02bb"+ - "\u02c3\7e\2\2\u02bc\u02bd\5V,\2\u02bd\u02be\7\6\2\2\u02be\u02c0\3\2\2"+ - "\2\u02bf\u02bc\3\2\2\2\u02bf\u02c0\3\2\2\2\u02c0\u02c1\3\2\2\2\u02c1\u02c3"+ - "\5V,\2\u02c2\u02b9\3\2\2\2\u02c2\u02bf\3\2\2\2\u02c3Y\3\2\2\2\u02c4\u02c7"+ - "\7f\2\2\u02c5\u02c7\7g\2\2\u02c6\u02c4\3\2\2\2\u02c6\u02c5\3\2\2\2\u02c7"+ - "[\3\2\2\2\u02c8\u02cc\7c\2\2\u02c9\u02cc\5b\62\2\u02ca\u02cc\7d\2\2\u02cb"+ - "\u02c8\3\2\2\2\u02cb\u02c9\3\2\2\2\u02cb\u02ca\3\2\2\2\u02cc]\3\2\2\2"+ - "\u02cd\u02cf\t\n\2\2\u02ce\u02cd\3\2\2\2\u02ce\u02cf\3\2\2\2\u02cf\u02d0"+ - "\3\2\2\2\u02d0\u02d6\7b\2\2\u02d1\u02d3\t\n\2\2\u02d2\u02d1\3\2\2\2\u02d2"+ - "\u02d3\3\2\2\2\u02d3\u02d4\3\2\2\2\u02d4\u02d6\7a\2\2\u02d5\u02ce\3\2"+ - "\2\2\u02d5\u02d2\3\2\2\2\u02d6_\3\2\2\2\u02d7\u02d8\t\16\2\2\u02d8a\3"+ - "\2\2\2\u02d9\u02da\t\17\2\2\u02dac\3\2\2\2dsuy\u0082\u0084\u0088\u008f"+ - "\u0096\u009b\u00a0\u00aa\u00ae\u00b6\u00b9\u00bf\u00c4\u00c7\u00ce\u00d6"+ - "\u00d9\u00e5\u00e8\u00eb\u00f2\u00f9\u00fd\u0101\u0108\u010c\u0110\u0115"+ - "\u0119\u0121\u0125\u012c\u0137\u013a\u013e\u014a\u014d\u0153\u015a\u0161"+ - "\u0164\u0168\u016c\u0170\u0172\u017d\u0182\u0186\u0189\u018f\u0192\u0198"+ - "\u019b\u019d\u01b0\u01be\u01cc\u01d2\u01da\u01dc\u01e1\u01e4\u01ec\u01f5"+ - "\u01fb\u0203\u0208\u020e\u0211\u0218\u0220\u0226\u0232\u0234\u023e\u024c"+ - "\u0253\u0261\u026f\u0274\u027b\u027e\u0285\u028d\u02a0\u02ad\u02b4\u02b9"+ - "\u02bf\u02c2\u02c6\u02cb\u02ce\u02d2\u02d5"; + ",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\3\2\3\2"+ + "\3\2\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4v\n\4\f\4\16\4"+ + "y\13\4\3\4\5\4|\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0085\n\4\f\4\16\4"+ + "\u0088\13\4\3\4\5\4\u008b\n\4\3\4\3\4\3\4\3\4\3\4\5\4\u0092\n\4\3\4\3"+ + "\4\3\4\3\4\3\4\5\4\u0099\n\4\3\4\3\4\3\4\5\4\u009e\n\4\3\4\3\4\3\4\5\4"+ + "\u00a3\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\5\4\u00ad\n\4\3\4\3\4\5\4\u00b1"+ + "\n\4\3\4\3\4\3\4\3\4\7\4\u00b7\n\4\f\4\16\4\u00ba\13\4\5\4\u00bc\n\4\3"+ + "\4\3\4\3\4\3\4\5\4\u00c2\n\4\3\4\3\4\3\4\5\4\u00c7\n\4\3\4\5\4\u00ca\n"+ + "\4\3\4\3\4\3\4\3\4\3\4\5\4\u00d1\n\4\3\5\3\5\3\5\3\5\7\5\u00d7\n\5\f\5"+ + "\16\5\u00da\13\5\5\5\u00dc\n\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6\7\6\u00e6"+ + "\n\6\f\6\16\6\u00e9\13\6\5\6\u00eb\n\6\3\6\5\6\u00ee\n\6\3\7\3\7\3\7\3"+ + "\7\3\7\5\7\u00f5\n\7\3\b\3\b\3\b\3\b\3\b\5\b\u00fc\n\b\3\t\3\t\5\t\u0100"+ + "\n\t\3\n\3\n\5\n\u0104\n\n\3\n\3\n\3\n\7\n\u0109\n\n\f\n\16\n\u010c\13"+ + "\n\3\n\5\n\u010f\n\n\3\n\3\n\5\n\u0113\n\n\3\n\3\n\3\n\5\n\u0118\n\n\3"+ + "\n\3\n\5\n\u011c\n\n\3\13\3\13\3\13\3\13\7\13\u0122\n\13\f\13\16\13\u0125"+ + "\13\13\3\f\5\f\u0128\n\f\3\f\3\f\3\f\7\f\u012d\n\f\f\f\16\f\u0130\13\f"+ + "\3\r\3\r\3\16\3\16\3\16\3\16\7\16\u0138\n\16\f\16\16\16\u013b\13\16\5"+ + "\16\u013d\n\16\3\16\3\16\5\16\u0141\n\16\3\17\3\17\3\17\3\17\3\17\3\17"+ + "\3\20\3\20\3\21\3\21\5\21\u014d\n\21\3\21\5\21\u0150\n\21\3\22\3\22\7"+ + "\22\u0154\n\22\f\22\16\22\u0157\13\22\3\23\3\23\3\23\3\23\5\23\u015d\n"+ + "\23\3\23\3\23\3\23\3\23\3\23\5\23\u0164\n\23\3\24\5\24\u0167\n\24\3\24"+ + "\3\24\5\24\u016b\n\24\3\24\3\24\5\24\u016f\n\24\3\24\3\24\5\24\u0173\n"+ + "\24\5\24\u0175\n\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\7\25\u017e\n\25"+ + "\f\25\16\25\u0181\13\25\3\25\3\25\5\25\u0185\n\25\3\26\3\26\5\26\u0189"+ + "\n\26\3\26\5\26\u018c\n\26\3\26\3\26\3\26\3\26\5\26\u0192\n\26\3\26\5"+ + "\26\u0195\n\26\3\26\3\26\3\26\3\26\5\26\u019b\n\26\3\26\5\26\u019e\n\26"+ + "\5\26\u01a0\n\26\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30"+ + "\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30"+ + "\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\5\30\u01c3\n\30\3\30\3\30\3\30"+ + "\3\30\3\30\3\30\7\30\u01cb\n\30\f\30\16\30\u01ce\13\30\3\31\3\31\7\31"+ + "\u01d2\n\31\f\31\16\31\u01d5\13\31\3\32\3\32\5\32\u01d9\n\32\3\33\5\33"+ + "\u01dc\n\33\3\33\3\33\3\33\3\33\3\33\3\33\5\33\u01e4\n\33\3\33\3\33\3"+ + "\33\3\33\3\33\7\33\u01eb\n\33\f\33\16\33\u01ee\13\33\3\33\3\33\3\33\5"+ + "\33\u01f3\n\33\3\33\3\33\3\33\3\33\3\33\3\33\5\33\u01fb\n\33\3\33\3\33"+ + "\3\33\5\33\u0200\n\33\3\33\3\33\3\33\3\33\5\33\u0206\n\33\3\33\5\33\u0209"+ + "\n\33\3\34\3\34\3\34\3\35\3\35\5\35\u0210\n\35\3\36\3\36\3\36\3\36\3\36"+ + "\3\36\5\36\u0218\n\36\3\37\3\37\3\37\3\37\5\37\u021e\n\37\3\37\3\37\3"+ + "\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\7\37\u022a\n\37\f\37\16\37\u022d"+ + "\13\37\3 \3 \3 \3 \3 \3 \3 \5 \u0236\n \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 "+ + "\3 \3 \5 \u0244\n \3!\3!\3!\3!\3!\5!\u024b\n!\3\"\3\"\3\"\3\"\3\"\3\""+ + "\3\"\3#\3#\3#\3#\3#\5#\u0259\n#\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\5"+ + "%\u0267\n%\3&\3&\3&\5&\u026c\n&\3&\3&\3&\7&\u0271\n&\f&\16&\u0274\13&"+ + "\5&\u0276\n&\3&\3&\3\'\3\'\3\'\5\'\u027d\n\'\3(\3(\3(\3(\6(\u0283\n(\r"+ + "(\16(\u0284\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\5(\u0298"+ + "\n(\3)\3)\3*\3*\3+\3+\3,\3,\3,\7,\u02a3\n,\f,\16,\u02a6\13,\3,\3,\3-\3"+ + "-\5-\u02ac\n-\3.\3.\3.\5.\u02b1\n.\3.\3.\3.\3.\5.\u02b7\n.\3.\5.\u02ba"+ + "\n.\3/\3/\5/\u02be\n/\3\60\3\60\3\60\5\60\u02c3\n\60\3\61\5\61\u02c6\n"+ + "\61\3\61\3\61\5\61\u02ca\n\61\3\61\5\61\u02cd\n\61\3\62\3\62\3\63\3\63"+ + "\3\63\2\4.<\64\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64"+ + "\668:<>@BDFHJLNPRTVXZ\\^`bd\2\20\b\2\7\7\t\t\31\31,,\62\62\66\66\4\2\""+ + "\"BB\4\2\t\t\62\62\4\2\37\37%%\3\2\25\26\4\2\7\7aa\4\2\r\r\25\25\4\2\7"+ + "\7\27\27\3\2XY\3\2Z\\\3\2RW\4\2\35\35CC\3\2_`\20\2\b\t\22\24\31\31\33"+ + "\33\36\36!\",,\62\62\668:<>?ABDEGG\u032c\2f\3\2\2\2\4i\3\2\2\2\6\u00d0"+ + "\3\2\2\2\b\u00db\3\2\2\2\n\u00df\3\2\2\2\f\u00f4\3\2\2\2\16\u00fb\3\2"+ + "\2\2\20\u00fd\3\2\2\2\22\u0101\3\2\2\2\24\u011d\3\2\2\2\26\u0127\3\2\2"+ + "\2\30\u0131\3\2\2\2\32\u0140\3\2\2\2\34\u0142\3\2\2\2\36\u0148\3\2\2\2"+ + " \u014a\3\2\2\2\"\u0151\3\2\2\2$\u0163\3\2\2\2&\u0174\3\2\2\2(\u0184\3"+ + "\2\2\2*\u019f\3\2\2\2,\u01a1\3\2\2\2.\u01c2\3\2\2\2\60\u01d3\3\2\2\2\62"+ + "\u01d6\3\2\2\2\64\u0208\3\2\2\2\66\u020a\3\2\2\28\u020d\3\2\2\2:\u0217"+ + "\3\2\2\2<\u021d\3\2\2\2>\u0243\3\2\2\2@\u024a\3\2\2\2B\u024c\3\2\2\2D"+ + "\u0258\3\2\2\2F\u025a\3\2\2\2H\u0266\3\2\2\2J\u0268\3\2\2\2L\u027c\3\2"+ + "\2\2N\u0297\3\2\2\2P\u0299\3\2\2\2R\u029b\3\2\2\2T\u029d\3\2\2\2V\u02a4"+ + "\3\2\2\2X\u02ab\3\2\2\2Z\u02b9\3\2\2\2\\\u02bd\3\2\2\2^\u02c2\3\2\2\2"+ + "`\u02cc\3\2\2\2b\u02ce\3\2\2\2d\u02d0\3\2\2\2fg\5\6\4\2gh\7\2\2\3h\3\3"+ + "\2\2\2ij\5,\27\2jk\7\2\2\3k\5\3\2\2\2l\u00d1\5\b\5\2m{\7\33\2\2nw\7\3"+ + "\2\2op\78\2\2pv\t\2\2\2qr\7\36\2\2rv\t\3\2\2st\7G\2\2tv\5R*\2uo\3\2\2"+ + "\2uq\3\2\2\2us\3\2\2\2vy\3\2\2\2wu\3\2\2\2wx\3\2\2\2xz\3\2\2\2yw\3\2\2"+ + "\2z|\7\4\2\2{n\3\2\2\2{|\3\2\2\2|}\3\2\2\2}\u00d1\5\6\4\2~\u008a\7\24"+ + "\2\2\177\u0086\7\3\2\2\u0080\u0081\78\2\2\u0081\u0085\t\4\2\2\u0082\u0083"+ + "\7\36\2\2\u0083\u0085\t\3\2\2\u0084\u0080\3\2\2\2\u0084\u0082\3\2\2\2"+ + "\u0085\u0088\3\2\2\2\u0086\u0084\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0089"+ + "\3\2\2\2\u0088\u0086\3\2\2\2\u0089\u008b\7\4\2\2\u008a\177\3\2\2\2\u008a"+ + "\u008b\3\2\2\2\u008b\u008c\3\2\2\2\u008c\u00d1\5\6\4\2\u008d\u008e\7>"+ + "\2\2\u008e\u0091\7A\2\2\u008f\u0092\5\66\34\2\u0090\u0092\5Z.\2\u0091"+ + "\u008f\3\2\2\2\u0091\u0090\3\2\2\2\u0091\u0092\3\2\2\2\u0092\u00d1\3\2"+ + "\2\2\u0093\u0094\7>\2\2\u0094\u0095\7\23\2\2\u0095\u0098\t\5\2\2\u0096"+ + "\u0099\5\66\34\2\u0097\u0099\5Z.\2\u0098\u0096\3\2\2\2\u0098\u0097\3\2"+ + "\2\2\u0099\u00d1\3\2\2\2\u009a\u009d\t\6\2\2\u009b\u009e\5\66\34\2\u009c"+ + "\u009e\5Z.\2\u009d\u009b\3\2\2\2\u009d\u009c\3\2\2\2\u009e\u00d1\3\2\2"+ + "\2\u009f\u00a0\7>\2\2\u00a0\u00a2\7!\2\2\u00a1\u00a3\5\66\34\2\u00a2\u00a1"+ + "\3\2\2\2\u00a2\u00a3\3\2\2\2\u00a3\u00d1\3\2\2\2\u00a4\u00a5\7>\2\2\u00a5"+ + "\u00d1\7<\2\2\u00a6\u00a7\7?\2\2\u00a7\u00d1\7\22\2\2\u00a8\u00a9\7?\2"+ + "\2\u00a9\u00ac\7A\2\2\u00aa\u00ab\7\21\2\2\u00ab\u00ad\5\66\34\2\u00ac"+ + "\u00aa\3\2\2\2\u00ac\u00ad\3\2\2\2\u00ad\u00b0\3\2\2\2\u00ae\u00b1\5\66"+ + "\34\2\u00af\u00b1\5Z.\2\u00b0\u00ae\3\2\2\2\u00b0\u00af\3\2\2\2\u00b0"+ + "\u00b1\3\2\2\2\u00b1\u00bb\3\2\2\2\u00b2\u00b3\7D\2\2\u00b3\u00b8\5b\62"+ + "\2\u00b4\u00b5\7\5\2\2\u00b5\u00b7\5b\62\2\u00b6\u00b4\3\2\2\2\u00b7\u00ba"+ + "\3\2\2\2\u00b8\u00b6\3\2\2\2\u00b8\u00b9\3\2\2\2\u00b9\u00bc\3\2\2\2\u00ba"+ + "\u00b8\3\2\2\2\u00bb\u00b2\3\2\2\2\u00bb\u00bc\3\2\2\2\u00bc\u00d1\3\2"+ + "\2\2\u00bd\u00be\7?\2\2\u00be\u00c1\7\23\2\2\u00bf\u00c0\7\21\2\2\u00c0"+ + "\u00c2\5b\62\2\u00c1\u00bf\3\2\2\2\u00c1\u00c2\3\2\2\2\u00c2\u00c6\3\2"+ + "\2\2\u00c3\u00c4\7@\2\2\u00c4\u00c7\5\66\34\2\u00c5\u00c7\5Z.\2\u00c6"+ + "\u00c3\3\2\2\2\u00c6\u00c5\3\2\2\2\u00c6\u00c7\3\2\2\2\u00c7\u00c9\3\2"+ + "\2\2\u00c8\u00ca\5\66\34\2\u00c9\u00c8\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca"+ + "\u00d1\3\2\2\2\u00cb\u00cc\7?\2\2\u00cc\u00d1\7E\2\2\u00cd\u00ce\7?\2"+ + "\2\u00ce\u00cf\7@\2\2\u00cf\u00d1\7E\2\2\u00d0l\3\2\2\2\u00d0m\3\2\2\2"+ + "\u00d0~\3\2\2\2\u00d0\u008d\3\2\2\2\u00d0\u0093\3\2\2\2\u00d0\u009a\3"+ + "\2\2\2\u00d0\u009f\3\2\2\2\u00d0\u00a4\3\2\2\2\u00d0\u00a6\3\2\2\2\u00d0"+ + "\u00a8\3\2\2\2\u00d0\u00bd\3\2\2\2\u00d0\u00cb\3\2\2\2\u00d0\u00cd\3\2"+ + "\2\2\u00d1\7\3\2\2\2\u00d2\u00d3\7I\2\2\u00d3\u00d8\5\34\17\2\u00d4\u00d5"+ + "\7\5\2\2\u00d5\u00d7\5\34\17\2\u00d6\u00d4\3\2\2\2\u00d7\u00da\3\2\2\2"+ + "\u00d8\u00d6\3\2\2\2\u00d8\u00d9\3\2\2\2\u00d9\u00dc\3\2\2\2\u00da\u00d8"+ + "\3\2\2\2\u00db\u00d2\3\2\2\2\u00db\u00dc\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd"+ + "\u00de\5\n\6\2\u00de\t\3\2\2\2\u00df\u00ea\5\16\b\2\u00e0\u00e1\7\64\2"+ + "\2\u00e1\u00e2\7\17\2\2\u00e2\u00e7\5\20\t\2\u00e3\u00e4\7\5\2\2\u00e4"+ + "\u00e6\5\20\t\2\u00e5\u00e3\3\2\2\2\u00e6\u00e9\3\2\2\2\u00e7\u00e5\3"+ + "\2\2\2\u00e7\u00e8\3\2\2\2\u00e8\u00eb\3\2\2\2\u00e9\u00e7\3\2\2\2\u00ea"+ + "\u00e0\3\2\2\2\u00ea\u00eb\3\2\2\2\u00eb\u00ed\3\2\2\2\u00ec\u00ee\5\f"+ + "\7\2\u00ed\u00ec\3\2\2\2\u00ed\u00ee\3\2\2\2\u00ee\13\3\2\2\2\u00ef\u00f0"+ + "\7+\2\2\u00f0\u00f5\t\7\2\2\u00f1\u00f2\7L\2\2\u00f2\u00f3\t\7\2\2\u00f3"+ + "\u00f5\7Q\2\2\u00f4\u00ef\3\2\2\2\u00f4\u00f1\3\2\2\2\u00f5\r\3\2\2\2"+ + "\u00f6\u00fc\5\22\n\2\u00f7\u00f8\7\3\2\2\u00f8\u00f9\5\n\6\2\u00f9\u00fa"+ + "\7\4\2\2\u00fa\u00fc\3\2\2\2\u00fb\u00f6\3\2\2\2\u00fb\u00f7\3\2\2\2\u00fc"+ + "\17\3\2\2\2\u00fd\u00ff\5,\27\2\u00fe\u0100\t\b\2\2\u00ff\u00fe\3\2\2"+ + "\2\u00ff\u0100\3\2\2\2\u0100\21\3\2\2\2\u0101\u0103\7=\2\2\u0102\u0104"+ + "\5\36\20\2\u0103\u0102\3\2\2\2\u0103\u0104\3\2\2\2\u0104\u0105\3\2\2\2"+ + "\u0105\u010a\5 \21\2\u0106\u0107\7\5\2\2\u0107\u0109\5 \21\2\u0108\u0106"+ + "\3\2\2\2\u0109\u010c\3\2\2\2\u010a\u0108\3\2\2\2\u010a\u010b\3\2\2\2\u010b"+ + "\u010e\3\2\2\2\u010c\u010a\3\2\2\2\u010d\u010f\5\24\13\2\u010e\u010d\3"+ + "\2\2\2\u010e\u010f\3\2\2\2\u010f\u0112\3\2\2\2\u0110\u0111\7H\2\2\u0111"+ + "\u0113\5.\30\2\u0112\u0110\3\2\2\2\u0112\u0113\3\2\2\2\u0113\u0117\3\2"+ + "\2\2\u0114\u0115\7#\2\2\u0115\u0116\7\17\2\2\u0116\u0118\5\26\f\2\u0117"+ + "\u0114\3\2\2\2\u0117\u0118\3\2\2\2\u0118\u011b\3\2\2\2\u0119\u011a\7$"+ + "\2\2\u011a\u011c\5.\30\2\u011b\u0119\3\2\2\2\u011b\u011c\3\2\2\2\u011c"+ + "\23\3\2\2\2\u011d\u011e\7\37\2\2\u011e\u0123\5\"\22\2\u011f\u0120\7\5"+ + "\2\2\u0120\u0122\5\"\22\2\u0121\u011f\3\2\2\2\u0122\u0125\3\2\2\2\u0123"+ + "\u0121\3\2\2\2\u0123\u0124\3\2\2\2\u0124\25\3\2\2\2\u0125\u0123\3\2\2"+ + "\2\u0126\u0128\5\36\20\2\u0127\u0126\3\2\2\2\u0127\u0128\3\2\2\2\u0128"+ + "\u0129\3\2\2\2\u0129\u012e\5\30\r\2\u012a\u012b\7\5\2\2\u012b\u012d\5"+ + "\30\r\2\u012c\u012a\3\2\2\2\u012d\u0130\3\2\2\2\u012e\u012c\3\2\2\2\u012e"+ + "\u012f\3\2\2\2\u012f\27\3\2\2\2\u0130\u012e\3\2\2\2\u0131\u0132\5\32\16"+ + "\2\u0132\31\3\2\2\2\u0133\u013c\7\3\2\2\u0134\u0139\5,\27\2\u0135\u0136"+ + "\7\5\2\2\u0136\u0138\5,\27\2\u0137\u0135\3\2\2\2\u0138\u013b\3\2\2\2\u0139"+ + "\u0137\3\2\2\2\u0139\u013a\3\2\2\2\u013a\u013d\3\2\2\2\u013b\u0139\3\2"+ + "\2\2\u013c\u0134\3\2\2\2\u013c\u013d\3\2\2\2\u013d\u013e\3\2\2\2\u013e"+ + "\u0141\7\4\2\2\u013f\u0141\5,\27\2\u0140\u0133\3\2\2\2\u0140\u013f\3\2"+ + "\2\2\u0141\33\3\2\2\2\u0142\u0143\5X-\2\u0143\u0144\7\f\2\2\u0144\u0145"+ + "\7\3\2\2\u0145\u0146\5\n\6\2\u0146\u0147\7\4\2\2\u0147\35\3\2\2\2\u0148"+ + "\u0149\t\t\2\2\u0149\37\3\2\2\2\u014a\u014f\5,\27\2\u014b\u014d\7\f\2"+ + "\2\u014c\u014b\3\2\2\2\u014c\u014d\3\2\2\2\u014d\u014e\3\2\2\2\u014e\u0150"+ + "\5X-\2\u014f\u014c\3\2\2\2\u014f\u0150\3\2\2\2\u0150!\3\2\2\2\u0151\u0155"+ + "\5*\26\2\u0152\u0154\5$\23\2\u0153\u0152\3\2\2\2\u0154\u0157\3\2\2\2\u0155"+ + "\u0153\3\2\2\2\u0155\u0156\3\2\2\2\u0156#\3\2\2\2\u0157\u0155\3\2\2\2"+ + "\u0158\u0159\5&\24\2\u0159\u015a\7(\2\2\u015a\u015c\5*\26\2\u015b\u015d"+ + "\5(\25\2\u015c\u015b\3\2\2\2\u015c\u015d\3\2\2\2\u015d\u0164\3\2\2\2\u015e"+ + "\u015f\7.\2\2\u015f\u0160\5&\24\2\u0160\u0161\7(\2\2\u0161\u0162\5*\26"+ + "\2\u0162\u0164\3\2\2\2\u0163\u0158\3\2\2\2\u0163\u015e\3\2\2\2\u0164%"+ + "\3\2\2\2\u0165\u0167\7&\2\2\u0166\u0165\3\2\2\2\u0166\u0167\3\2\2\2\u0167"+ + "\u0175\3\2\2\2\u0168\u016a\7)\2\2\u0169\u016b\7\65\2\2\u016a\u0169\3\2"+ + "\2\2\u016a\u016b\3\2\2\2\u016b\u0175\3\2\2\2\u016c\u016e\79\2\2\u016d"+ + "\u016f\7\65\2\2\u016e\u016d\3\2\2\2\u016e\u016f\3\2\2\2\u016f\u0175\3"+ + "\2\2\2\u0170\u0172\7 \2\2\u0171\u0173\7\65\2\2\u0172\u0171\3\2\2\2\u0172"+ + "\u0173\3\2\2\2\u0173\u0175\3\2\2\2\u0174\u0166\3\2\2\2\u0174\u0168\3\2"+ + "\2\2\u0174\u016c\3\2\2\2\u0174\u0170\3\2\2\2\u0175\'\3\2\2\2\u0176\u0177"+ + "\7\61\2\2\u0177\u0185\5.\30\2\u0178\u0179\7F\2\2\u0179\u017a\7\3\2\2\u017a"+ + "\u017f\5X-\2\u017b\u017c\7\5\2\2\u017c\u017e\5X-\2\u017d\u017b\3\2\2\2"+ + "\u017e\u0181\3\2\2\2\u017f\u017d\3\2\2\2\u017f\u0180\3\2\2\2\u0180\u0182"+ + "\3\2\2\2\u0181\u017f\3\2\2\2\u0182\u0183\7\4\2\2\u0183\u0185\3\2\2\2\u0184"+ + "\u0176\3\2\2\2\u0184\u0178\3\2\2\2\u0185)\3\2\2\2\u0186\u018b\5Z.\2\u0187"+ + "\u0189\7\f\2\2\u0188\u0187\3\2\2\2\u0188\u0189\3\2\2\2\u0189\u018a\3\2"+ + "\2\2\u018a\u018c\5V,\2\u018b\u0188\3\2\2\2\u018b\u018c\3\2\2\2\u018c\u01a0"+ + "\3\2\2\2\u018d\u018e\7\3\2\2\u018e\u018f\5\n\6\2\u018f\u0194\7\4\2\2\u0190"+ + "\u0192\7\f\2\2\u0191\u0190\3\2\2\2\u0191\u0192\3\2\2\2\u0192\u0193\3\2"+ + "\2\2\u0193\u0195\5V,\2\u0194\u0191\3\2\2\2\u0194\u0195\3\2\2\2\u0195\u01a0"+ + "\3\2\2\2\u0196\u0197\7\3\2\2\u0197\u0198\5\"\22\2\u0198\u019d\7\4\2\2"+ + "\u0199\u019b\7\f\2\2\u019a\u0199\3\2\2\2\u019a\u019b\3\2\2\2\u019b\u019c"+ + "\3\2\2\2\u019c\u019e\5V,\2\u019d\u019a\3\2\2\2\u019d\u019e\3\2\2\2\u019e"+ + "\u01a0\3\2\2\2\u019f\u0186\3\2\2\2\u019f\u018d\3\2\2\2\u019f\u0196\3\2"+ + "\2\2\u01a0+\3\2\2\2\u01a1\u01a2\5.\30\2\u01a2-\3\2\2\2\u01a3\u01a4\b\30"+ + "\1\2\u01a4\u01a5\7/\2\2\u01a5\u01c3\5.\30\n\u01a6\u01a7\7\32\2\2\u01a7"+ + "\u01a8\7\3\2\2\u01a8\u01a9\5\b\5\2\u01a9\u01aa\7\4\2\2\u01aa\u01c3\3\2"+ + "\2\2\u01ab\u01ac\7;\2\2\u01ac\u01ad\7\3\2\2\u01ad\u01ae\5b\62\2\u01ae"+ + "\u01af\5\60\31\2\u01af\u01b0\7\4\2\2\u01b0\u01c3\3\2\2\2\u01b1\u01b2\7"+ + "-\2\2\u01b2\u01b3\7\3\2\2\u01b3\u01b4\5V,\2\u01b4\u01b5\7\5\2\2\u01b5"+ + "\u01b6\5b\62\2\u01b6\u01b7\5\60\31\2\u01b7\u01b8\7\4\2\2\u01b8\u01c3\3"+ + "\2\2\2\u01b9\u01ba\7-\2\2\u01ba\u01bb\7\3\2\2\u01bb\u01bc\5b\62\2\u01bc"+ + "\u01bd\7\5\2\2\u01bd\u01be\5b\62\2\u01be\u01bf\5\60\31\2\u01bf\u01c0\7"+ + "\4\2\2\u01c0\u01c3\3\2\2\2\u01c1\u01c3\5\62\32\2\u01c2\u01a3\3\2\2\2\u01c2"+ + "\u01a6\3\2\2\2\u01c2\u01ab\3\2\2\2\u01c2\u01b1\3\2\2\2\u01c2\u01b9\3\2"+ + "\2\2\u01c2\u01c1\3\2\2\2\u01c3\u01cc\3\2\2\2\u01c4\u01c5\f\4\2\2\u01c5"+ + "\u01c6\7\n\2\2\u01c6\u01cb\5.\30\5\u01c7\u01c8\f\3\2\2\u01c8\u01c9\7\63"+ + "\2\2\u01c9\u01cb\5.\30\4\u01ca\u01c4\3\2\2\2\u01ca\u01c7\3\2\2\2\u01cb"+ + "\u01ce\3\2\2\2\u01cc\u01ca\3\2\2\2\u01cc\u01cd\3\2\2\2\u01cd/\3\2\2\2"+ + "\u01ce\u01cc\3\2\2\2\u01cf\u01d0\7\5\2\2\u01d0\u01d2\5b\62\2\u01d1\u01cf"+ + "\3\2\2\2\u01d2\u01d5\3\2\2\2\u01d3\u01d1\3\2\2\2\u01d3\u01d4\3\2\2\2\u01d4"+ + "\61\3\2\2\2\u01d5\u01d3\3\2\2\2\u01d6\u01d8\5<\37\2\u01d7\u01d9\5\64\33"+ + "\2\u01d8\u01d7\3\2\2\2\u01d8\u01d9\3\2\2\2\u01d9\63\3\2\2\2\u01da\u01dc"+ + "\7/\2\2\u01db\u01da\3\2\2\2\u01db\u01dc\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd"+ + "\u01de\7\16\2\2\u01de\u01df\5<\37\2\u01df\u01e0\7\n\2\2\u01e0\u01e1\5"+ + "<\37\2\u01e1\u0209\3\2\2\2\u01e2\u01e4\7/\2\2\u01e3\u01e2\3\2\2\2\u01e3"+ + "\u01e4\3\2\2\2\u01e4\u01e5\3\2\2\2\u01e5\u01e6\7%\2\2\u01e6\u01e7\7\3"+ + "\2\2\u01e7\u01ec\5,\27\2\u01e8\u01e9\7\5\2\2\u01e9\u01eb\5,\27\2\u01ea"+ + "\u01e8\3\2\2\2\u01eb\u01ee\3\2\2\2\u01ec\u01ea\3\2\2\2\u01ec\u01ed\3\2"+ + "\2\2\u01ed\u01ef\3\2\2\2\u01ee\u01ec\3\2\2\2\u01ef\u01f0\7\4\2\2\u01f0"+ + "\u0209\3\2\2\2\u01f1\u01f3\7/\2\2\u01f2\u01f1\3\2\2\2\u01f2\u01f3\3\2"+ + "\2\2\u01f3\u01f4\3\2\2\2\u01f4\u01f5\7%\2\2\u01f5\u01f6\7\3\2\2\u01f6"+ + "\u01f7\5\b\5\2\u01f7\u01f8\7\4\2\2\u01f8\u0209\3\2\2\2\u01f9\u01fb\7/"+ + "\2\2\u01fa\u01f9\3\2\2\2\u01fa\u01fb\3\2\2\2\u01fb\u01fc\3\2\2\2\u01fc"+ + "\u01fd\7*\2\2\u01fd\u0209\58\35\2\u01fe\u0200\7/\2\2\u01ff\u01fe\3\2\2"+ + "\2\u01ff\u0200\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u0202\7:\2\2\u0202\u0209"+ + "\5b\62\2\u0203\u0205\7\'\2\2\u0204\u0206\7/\2\2\u0205\u0204\3\2\2\2\u0205"+ + "\u0206\3\2\2\2\u0206\u0207\3\2\2\2\u0207\u0209\7\60\2\2\u0208\u01db\3"+ + "\2\2\2\u0208\u01e3\3\2\2\2\u0208\u01f2\3\2\2\2\u0208\u01fa\3\2\2\2\u0208"+ + "\u01ff\3\2\2\2\u0208\u0203\3\2\2\2\u0209\65\3\2\2\2\u020a\u020b\7*\2\2"+ + "\u020b\u020c\58\35\2\u020c\67\3\2\2\2\u020d\u020f\5b\62\2\u020e\u0210"+ + "\5:\36\2\u020f\u020e\3\2\2\2\u020f\u0210\3\2\2\2\u02109\3\2\2\2\u0211"+ + "\u0212\7\30\2\2\u0212\u0218\5b\62\2\u0213\u0214\7J\2\2\u0214\u0215\5b"+ + "\62\2\u0215\u0216\7Q\2\2\u0216\u0218\3\2\2\2\u0217\u0211\3\2\2\2\u0217"+ + "\u0213\3\2\2\2\u0218;\3\2\2\2\u0219\u021a\b\37\1\2\u021a\u021e\5> \2\u021b"+ + "\u021c\t\n\2\2\u021c\u021e\5<\37\6\u021d\u0219\3\2\2\2\u021d\u021b\3\2"+ + "\2\2\u021e\u022b\3\2\2\2\u021f\u0220\f\5\2\2\u0220\u0221\t\13\2\2\u0221"+ + "\u022a\5<\37\6\u0222\u0223\f\4\2\2\u0223\u0224\t\n\2\2\u0224\u022a\5<"+ + "\37\5\u0225\u0226\f\3\2\2\u0226\u0227\5P)\2\u0227\u0228\5<\37\4\u0228"+ + "\u022a\3\2\2\2\u0229\u021f\3\2\2\2\u0229\u0222\3\2\2\2\u0229\u0225\3\2"+ + "\2\2\u022a\u022d\3\2\2\2\u022b\u0229\3\2\2\2\u022b\u022c\3\2\2\2\u022c"+ + "=\3\2\2\2\u022d\u022b\3\2\2\2\u022e\u0244\5@!\2\u022f\u0244\5D#\2\u0230"+ + "\u0244\5N(\2\u0231\u0244\7Z\2\2\u0232\u0233\5V,\2\u0233\u0234\7^\2\2\u0234"+ + "\u0236\3\2\2\2\u0235\u0232\3\2\2\2\u0235\u0236\3\2\2\2\u0236\u0237\3\2"+ + "\2\2\u0237\u0244\7Z\2\2\u0238\u0244\5H%\2\u0239\u023a\7\3\2\2\u023a\u023b"+ + "\5\b\5\2\u023b\u023c\7\4\2\2\u023c\u0244\3\2\2\2\u023d\u0244\5X-\2\u023e"+ + "\u0244\5V,\2\u023f\u0240\7\3\2\2\u0240\u0241\5,\27\2\u0241\u0242\7\4\2"+ + "\2\u0242\u0244\3\2\2\2\u0243\u022e\3\2\2\2\u0243\u022f\3\2\2\2\u0243\u0230"+ + "\3\2\2\2\u0243\u0231\3\2\2\2\u0243\u0235\3\2\2\2\u0243\u0238\3\2\2\2\u0243"+ + "\u0239\3\2\2\2\u0243\u023d\3\2\2\2\u0243\u023e\3\2\2\2\u0243\u023f\3\2"+ + "\2\2\u0244?\3\2\2\2\u0245\u024b\5B\"\2\u0246\u0247\7K\2\2\u0247\u0248"+ + "\5B\"\2\u0248\u0249\7Q\2\2\u0249\u024b\3\2\2\2\u024a\u0245\3\2\2\2\u024a"+ + "\u0246\3\2\2\2\u024bA\3\2\2\2\u024c\u024d\7\20\2\2\u024d\u024e\7\3\2\2"+ + "\u024e\u024f\5,\27\2\u024f\u0250\7\f\2\2\u0250\u0251\5T+\2\u0251\u0252"+ + "\7\4\2\2\u0252C\3\2\2\2\u0253\u0259\5F$\2\u0254\u0255\7K\2\2\u0255\u0256"+ + "\5F$\2\u0256\u0257\7Q\2\2\u0257\u0259\3\2\2\2\u0258\u0253\3\2\2\2\u0258"+ + "\u0254\3\2\2\2\u0259E\3\2\2\2\u025a\u025b\7\34\2\2\u025b\u025c\7\3\2\2"+ + "\u025c\u025d\5X-\2\u025d\u025e\7\37\2\2\u025e\u025f\5<\37\2\u025f\u0260"+ + "\7\4\2\2\u0260G\3\2\2\2\u0261\u0267\5J&\2\u0262\u0263\7K\2\2\u0263\u0264"+ + "\5J&\2\u0264\u0265\7Q\2\2\u0265\u0267\3\2\2\2\u0266\u0261\3\2\2\2\u0266"+ + "\u0262\3\2\2\2\u0267I\3\2\2\2\u0268\u0269\5L\'\2\u0269\u0275\7\3\2\2\u026a"+ + "\u026c\5\36\20\2\u026b\u026a\3\2\2\2\u026b\u026c\3\2\2\2\u026c\u026d\3"+ + "\2\2\2\u026d\u0272\5,\27\2\u026e\u026f\7\5\2\2\u026f\u0271\5,\27\2\u0270"+ + "\u026e\3\2\2\2\u0271\u0274\3\2\2\2\u0272\u0270\3\2\2\2\u0272\u0273\3\2"+ + "\2\2\u0273\u0276\3\2\2\2\u0274\u0272\3\2\2\2\u0275\u026b\3\2\2\2\u0275"+ + "\u0276\3\2\2\2\u0276\u0277\3\2\2\2\u0277\u0278\7\4\2\2\u0278K\3\2\2\2"+ + "\u0279\u027d\7)\2\2\u027a\u027d\79\2\2\u027b\u027d\5X-\2\u027c\u0279\3"+ + "\2\2\2\u027c\u027a\3\2\2\2\u027c\u027b\3\2\2\2\u027dM\3\2\2\2\u027e\u0298"+ + "\7\60\2\2\u027f\u0298\5`\61\2\u0280\u0298\5R*\2\u0281\u0283\7`\2\2\u0282"+ + "\u0281\3\2\2\2\u0283\u0284\3\2\2\2\u0284\u0282\3\2\2\2\u0284\u0285\3\2"+ + "\2\2\u0285\u0298\3\2\2\2\u0286\u0298\7_\2\2\u0287\u0288\7M\2\2\u0288\u0289"+ + "\5b\62\2\u0289\u028a\7Q\2\2\u028a\u0298\3\2\2\2\u028b\u028c\7N\2\2\u028c"+ + "\u028d\5b\62\2\u028d\u028e\7Q\2\2\u028e\u0298\3\2\2\2\u028f\u0290\7O\2"+ + "\2\u0290\u0291\5b\62\2\u0291\u0292\7Q\2\2\u0292\u0298\3\2\2\2\u0293\u0294"+ + "\7P\2\2\u0294\u0295\5b\62\2\u0295\u0296\7Q\2\2\u0296\u0298\3\2\2\2\u0297"+ + "\u027e\3\2\2\2\u0297\u027f\3\2\2\2\u0297\u0280\3\2\2\2\u0297\u0282\3\2"+ + "\2\2\u0297\u0286\3\2\2\2\u0297\u0287\3\2\2\2\u0297\u028b\3\2\2\2\u0297"+ + "\u028f\3\2\2\2\u0297\u0293\3\2\2\2\u0298O\3\2\2\2\u0299\u029a\t\f\2\2"+ + "\u029aQ\3\2\2\2\u029b\u029c\t\r\2\2\u029cS\3\2\2\2\u029d\u029e\5X-\2\u029e"+ + "U\3\2\2\2\u029f\u02a0\5X-\2\u02a0\u02a1\7^\2\2\u02a1\u02a3\3\2\2\2\u02a2"+ + "\u029f\3\2\2\2\u02a3\u02a6\3\2\2\2\u02a4\u02a2\3\2\2\2\u02a4\u02a5\3\2"+ + "\2\2\u02a5\u02a7\3\2\2\2\u02a6\u02a4\3\2\2\2\u02a7\u02a8\5X-\2\u02a8W"+ + "\3\2\2\2\u02a9\u02ac\5\\/\2\u02aa\u02ac\5^\60\2\u02ab\u02a9\3\2\2\2\u02ab"+ + "\u02aa\3\2\2\2\u02acY\3\2\2\2\u02ad\u02ae\5X-\2\u02ae\u02af\7\6\2\2\u02af"+ + "\u02b1\3\2\2\2\u02b0\u02ad\3\2\2\2\u02b0\u02b1\3\2\2\2\u02b1\u02b2\3\2"+ + "\2\2\u02b2\u02ba\7e\2\2\u02b3\u02b4\5X-\2\u02b4\u02b5\7\6\2\2\u02b5\u02b7"+ + "\3\2\2\2\u02b6\u02b3\3\2\2\2\u02b6\u02b7\3\2\2\2\u02b7\u02b8\3\2\2\2\u02b8"+ + "\u02ba\5X-\2\u02b9\u02b0\3\2\2\2\u02b9\u02b6\3\2\2\2\u02ba[\3\2\2\2\u02bb"+ + "\u02be\7f\2\2\u02bc\u02be\7g\2\2\u02bd\u02bb\3\2\2\2\u02bd\u02bc\3\2\2"+ + "\2\u02be]\3\2\2\2\u02bf\u02c3\7c\2\2\u02c0\u02c3\5d\63\2\u02c1\u02c3\7"+ + "d\2\2\u02c2\u02bf\3\2\2\2\u02c2\u02c0\3\2\2\2\u02c2\u02c1\3\2\2\2\u02c3"+ + "_\3\2\2\2\u02c4\u02c6\t\n\2\2\u02c5\u02c4\3\2\2\2\u02c5\u02c6\3\2\2\2"+ + "\u02c6\u02c7\3\2\2\2\u02c7\u02cd\7b\2\2\u02c8\u02ca\t\n\2\2\u02c9\u02c8"+ + "\3\2\2\2\u02c9\u02ca\3\2\2\2\u02ca\u02cb\3\2\2\2\u02cb\u02cd\7a\2\2\u02cc"+ + "\u02c5\3\2\2\2\u02cc\u02c9\3\2\2\2\u02cda\3\2\2\2\u02ce\u02cf\t\16\2\2"+ + "\u02cfc\3\2\2\2\u02d0\u02d1\t\17\2\2\u02d1e\3\2\2\2buw{\u0084\u0086\u008a"+ + "\u0091\u0098\u009d\u00a2\u00ac\u00b0\u00b8\u00bb\u00c1\u00c6\u00c9\u00d0"+ + "\u00d8\u00db\u00e7\u00ea\u00ed\u00f4\u00fb\u00ff\u0103\u010a\u010e\u0112"+ + "\u0117\u011b\u0123\u0127\u012e\u0139\u013c\u0140\u014c\u014f\u0155\u015c"+ + "\u0163\u0166\u016a\u016e\u0172\u0174\u017f\u0184\u0188\u018b\u0191\u0194"+ + "\u019a\u019d\u019f\u01c2\u01ca\u01cc\u01d3\u01d8\u01db\u01e3\u01ec\u01f2"+ + "\u01fa\u01ff\u0205\u0208\u020f\u0217\u021d\u0229\u022b\u0235\u0243\u024a"+ + "\u0258\u0266\u026b\u0272\u0275\u027c\u0284\u0297\u02a4\u02ab\u02b0\u02b6"+ + "\u02b9\u02bd\u02c2\u02c5\u02c9\u02cc"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java index 2c28b18cdf2ee..3b1b730e81bb7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java @@ -294,6 +294,12 @@ interface SqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#matchQueryOptions}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitMatchQueryOptions(SqlBaseParser.MatchQueryOptionsContext ctx); /** * Visit a parse tree produced by {@link SqlBaseParser#predicated}. * @param ctx the parse tree diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtilsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtilsTests.java new file mode 100644 index 0000000000000..7dd08e9c34cef --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtilsTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Map; + +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; + +public class FullTextUtilsTests extends ESTestCase { + + public void testColonDelimited() { + Map options = FullTextUtils.parseSettings("k1=v1;k2=v2", new Location(1, 1)); + assertThat(options.size(), is(2)); + assertThat(options, hasEntry("k1", "v1")); + assertThat(options, hasEntry("k2", "v2")); + } + + public void testColonDelimitedErrorString() { + ParsingException e = expectThrows(ParsingException.class, + () -> FullTextUtils.parseSettings("k1=v1;k2v2", new Location(1, 1))); + assertThat(e.getMessage(), is("line 1:3: Cannot parse entry k2v2 in options k1=v1;k2v2")); + assertThat(e.getLineNumber(), is(1)); + assertThat(e.getColumnNumber(), is(3)); + } + + public void testColonDelimitedErrorDuplicate() { + ParsingException e = expectThrows(ParsingException.class, + () -> FullTextUtils.parseSettings("k1=v1;k1=v2", new Location(1, 1))); + assertThat(e.getMessage(), is("line 1:3: Duplicate option k1=v2 detected in options k1=v1;k1=v2")); + assertThat(e.getLineNumber(), is(1)); + assertThat(e.getColumnNumber(), is(3)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java index de9c6c56da099..3e7e562e599d0 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java @@ -11,6 +11,10 @@ import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; import org.elasticsearch.xpack.sql.expression.UnresolvedStar; import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.sql.plan.logical.Filter; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.logical.OrderBy; import org.elasticsearch.xpack.sql.plan.logical.Project; @@ -19,6 +23,7 @@ import java.util.List; import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; @@ -92,6 +97,45 @@ public void testOrderByTwo() { assertEquals("baz", a.name()); } + public void testStringQuery() { + LogicalPlan plan = + parseStatement("SELECT * FROM FOO WHERE " + + "QUERY('foo', 'default_field=last_name;lenient=true', 'fuzzy_rewrite=scoring_boolean')"); + + StringQueryPredicate sqp = (StringQueryPredicate) ((Filter) plan.children().get(0).children().get(0)).condition(); + assertEquals("foo", sqp.query()); + assertEquals(3, sqp.optionMap().size()); + assertThat(sqp.optionMap(), hasEntry("default_field", "last_name")); + assertThat(sqp.optionMap(), hasEntry("lenient", "true")); + assertThat(sqp.optionMap(), hasEntry("fuzzy_rewrite", "scoring_boolean")); + } + + public void testMatchQuery() { + LogicalPlan plan = parseStatement("SELECT * FROM FOO WHERE " + + "MATCH(first_name, 'foo', 'operator=AND;lenient=true', 'fuzzy_rewrite=scoring_boolean')"); + + MatchQueryPredicate mqp = (MatchQueryPredicate) ((Filter) plan.children().get(0).children().get(0)).condition(); + assertEquals("foo", mqp.query()); + assertEquals("?first_name", mqp.field().toString()); + assertEquals(3, mqp.optionMap().size()); + assertThat(mqp.optionMap(), hasEntry("operator", "AND")); + assertThat(mqp.optionMap(), hasEntry("lenient", "true")); + assertThat(mqp.optionMap(), hasEntry("fuzzy_rewrite", "scoring_boolean")); + } + + public void testMultiMatchQuery() { + LogicalPlan plan = parseStatement("SELECT * FROM FOO WHERE " + + "MATCH('first_name,last_name', 'foo', 'operator=AND;type=best_fields', 'fuzzy_rewrite=scoring_boolean')"); + + MultiMatchQueryPredicate mmqp = (MultiMatchQueryPredicate) ((Filter) plan.children().get(0).children().get(0)).condition(); + assertEquals("foo", mmqp.query()); + assertEquals("first_name,last_name", mmqp.fieldString()); + assertEquals(3, mmqp.optionMap().size()); + assertThat(mmqp.optionMap(), hasEntry("operator", "AND")); + assertThat(mmqp.optionMap(), hasEntry("type", "best_fields")); + assertThat(mmqp.optionMap(), hasEntry("fuzzy_rewrite", "scoring_boolean")); + } + private LogicalPlan parseStatement(String sql) { return new SqlParser().createStatement(sql); } @@ -132,4 +176,4 @@ private String stringForDirection(Order.OrderDirection dir) { String dirStr = dir.toString(); return randomBoolean() && dirStr.equals("ASC") ? "" : " " + dirStr; } -} \ No newline at end of file +} diff --git a/x-pack/qa/sql/src/main/resources/fulltext.csv-spec b/x-pack/qa/sql/src/main/resources/fulltext.csv-spec index 5c032917ff153..93493ffdc2acb 100644 --- a/x-pack/qa/sql/src/main/resources/fulltext.csv-spec +++ b/x-pack/qa/sql/src/main/resources/fulltext.csv-spec @@ -23,6 +23,13 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Man*', ' 10096 |Jayson |M |Mandell ; +simpleQueryOptionsInMultipleCommaSeparatedStrings +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Man*', 'default_field=last_name;lenient=true', 'fuzzy_rewrite=scoring_boolean') LIMIT 5; + + emp_no:i | first_name:s | gender:s | last_name:s +10096 |Jayson |M |Mandell +; + matchQuery SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez'); @@ -37,6 +44,13 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_nam 10076 |Erez |F |Ritzmann ; +matchQueryWithOptionsInMultipleCommaSeparatedStrings +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true;cutoff_frequency=2','fuzzy_rewrite=scoring_boolean;minimum_should_match=1','operator=AND', 'max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); + + emp_no:i | first_name:s | gender:s | last_name:s +10076 |Erez |F |Ritzmann +; + multiMatchQuery SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'type=best_fields;operator=OR'); @@ -51,6 +65,13 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_na 10095 |Hilari |M |Morton ; +multiMatchQueryWithInMultipleCommaSeparatedStrings +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true', 'cutoff_frequency=2','tie_breaker=0.1;use_dis_max=true;fuzzy_rewrite=scoring_boolean','minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); + + emp_no:i | first_name:s | gender:s | last_name:s +10095 |Hilari |M |Morton +; + score SELECT emp_no, first_name, SCORE() FROM test_emp WHERE MATCH(first_name, 'Erez') ORDER BY SCORE(); From 251489d59a3087819ff3918df316ac145c5889b2 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 19 Sep 2018 10:18:03 +0200 Subject: [PATCH 40/46] Cut over to unwrap segment reader (#33843) The fix in #33757 introduces some workaround since FilterCodecReader didn't support unwrapping. This cuts over to a more elegant fix to access the readers segment infos. --- .../elasticsearch/common/lucene/Lucene.java | 4 + .../snapshots/SourceOnlySnapshot.java | 83 +++---------------- 2 files changed, 17 insertions(+), 70 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index dc8628f184e43..ea363e884613b 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -30,6 +30,7 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterCodecReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.IndexCommit; @@ -726,6 +727,9 @@ public static SegmentReader segmentReader(LeafReader reader) { } else if (reader instanceof FilterLeafReader) { final FilterLeafReader fReader = (FilterLeafReader) reader; return segmentReader(FilterLeafReader.unwrap(fReader)); + } else if (reader instanceof FilterCodecReader) { + final FilterCodecReader fReader = (FilterCodecReader) reader; + return segmentReader(FilterCodecReader.unwrap(fReader)); } // hard fail - we can't get a SegmentReader throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java index 6c38a25f69a58..fa7bccf8b08c8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java @@ -81,31 +81,21 @@ public synchronized List syncSnapshot(IndexCommit commit) throws IOExcep String segmentFileName; try (Lock writeLock = targetDirectory.obtainLock(IndexWriter.WRITE_LOCK_NAME); StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(commit)) { - SegmentInfos segmentInfos = reader.getSegmentInfos(); + SegmentInfos segmentInfos = reader.getSegmentInfos().clone(); + DirectoryReader wrappedReader = wrapReader(reader); List newInfos = new ArrayList<>(); - for (LeafReaderContext ctx : reader.leaves()) { + for (LeafReaderContext ctx : wrappedReader.leaves()) { LeafReader leafReader = ctx.reader(); - SegmentCommitInfo info = reader.getSegmentInfos().info(ctx.ord); - assert info.info.equals(Lucene.segmentReader(ctx.reader()).getSegmentInfo().info); - /* We could do this totally different without wrapping this dummy directory reader if FilterCodecReader would have a - * getDelegate method. This is fixed in LUCENE-8502 but we need to wait for it to come in 7.5.1 or 7.6. - * The reason here is that the ctx.ord is not guaranteed to be equivalent to the SegmentCommitInfo ord in the SegmentInfo - * object since we might drop fully deleted segments. if that happens we are using the wrong reader for the SI and - * might almost certainly expose deleted documents. - */ - DirectoryReader wrappedReader = wrapReader(new DummyDirectoryReader(reader.directory(), leafReader)); - if (wrappedReader.leaves().isEmpty() == false) { - leafReader = wrappedReader.leaves().get(0).reader(); - LiveDocs liveDocs = getLiveDocs(leafReader); - if (leafReader.numDocs() != 0) { // fully deleted segments don't need to be processed - SegmentCommitInfo newInfo = syncSegment(info, liveDocs, leafReader.getFieldInfos(), existingSegments, createdFiles); - newInfos.add(newInfo); - } + SegmentCommitInfo info = Lucene.segmentReader(leafReader).getSegmentInfo(); + LiveDocs liveDocs = getLiveDocs(leafReader); + if (leafReader.numDocs() != 0) { // fully deleted segments don't need to be processed + SegmentCommitInfo newInfo = syncSegment(info, liveDocs, leafReader.getFieldInfos(), existingSegments, createdFiles); + newInfos.add(newInfo); } } segmentInfos.clear(); segmentInfos.addAll(newInfos); - segmentInfos.setNextWriteGeneration(Math.max(segmentInfos.getGeneration(), generation)+1); + segmentInfos.setNextWriteGeneration(Math.max(segmentInfos.getGeneration(), generation) + 1); String pendingSegmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, "", segmentInfos.getGeneration()); try (IndexOutput segnOutput = targetDirectory.createOutput(pendingSegmentFileName, IOContext.DEFAULT)) { @@ -207,9 +197,9 @@ private SegmentCommitInfo syncSegment(SegmentCommitInfo segmentCommitInfo, LiveD newInfo = new SegmentCommitInfo(newSegmentInfo, 0, 0, -1, -1, -1); List fieldInfoCopy = new ArrayList<>(fieldInfos.size()); for (FieldInfo fieldInfo : fieldInfos) { - fieldInfoCopy.add(new FieldInfo(fieldInfo.name, fieldInfo.number, - false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, fieldInfo.attributes(), 0, 0, - fieldInfo.isSoftDeletesField())); + fieldInfoCopy.add(new FieldInfo(fieldInfo.name, fieldInfo.number, + false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, fieldInfo.attributes(), 0, 0, + fieldInfo.isSoftDeletesField())); } FieldInfos newFieldInfos = new FieldInfos(fieldInfoCopy.toArray(new FieldInfo[0])); codec.fieldInfosFormat().write(trackingDir, newSegmentInfo, segmentSuffix, newFieldInfos, IOContext.DEFAULT); @@ -250,7 +240,7 @@ private SegmentCommitInfo syncSegment(SegmentCommitInfo segmentCommitInfo, LiveD private boolean assertLiveDocs(Bits liveDocs, int deletes) { int actualDeletes = 0; - for (int i = 0; i < liveDocs.length(); i++ ) { + for (int i = 0; i < liveDocs.length(); i++) { if (liveDocs.get(i) == false) { actualDeletes++; } @@ -268,51 +258,4 @@ private static class LiveDocs { this.bits = bits; } } - - private static class DummyDirectoryReader extends DirectoryReader { - - protected DummyDirectoryReader(Directory directory, LeafReader... segmentReaders) throws IOException { - super(directory, segmentReaders); - } - - @Override - protected DirectoryReader doOpenIfChanged() throws IOException { - return null; - } - - @Override - protected DirectoryReader doOpenIfChanged(IndexCommit commit) throws IOException { - return null; - } - - @Override - protected DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws IOException { - return null; - } - - @Override - public long getVersion() { - return 0; - } - - @Override - public boolean isCurrent() throws IOException { - return false; - } - - @Override - public IndexCommit getIndexCommit() throws IOException { - return null; - } - - @Override - protected void doClose() throws IOException { - - } - - @Override - public CacheHelper getReaderCacheHelper() { - return null; - } - } } From a3e8b831ee3c83ccbecc28734d40a55d5cbddfec Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Wed, 19 Sep 2018 10:28:22 +0200 Subject: [PATCH 41/46] add elasticsearch-shard tool (#32281) Relates #31389 --- distribution/src/bin/elasticsearch-shard | 5 + distribution/src/bin/elasticsearch-shard.bat | 12 + docs/reference/commands/index.asciidoc | 2 + docs/reference/commands/shard-tool.asciidoc | 107 +++ .../reference/index-modules/translog.asciidoc | 4 + .../java/org/elasticsearch/cli/Terminal.java | 7 +- .../packaging/test/ArchiveTestCase.java | 17 + .../packaging/util/Archives.java | 1 + .../packaging/util/Installation.java | 3 +- .../packaging/util/Packages.java | 1 + .../resources/packaging/utils/packages.bash | 1 + .../test/resources/packaging/utils/tar.bash | 1 + .../elasticsearch/env/NodeEnvironment.java | 166 +++-- .../RemoveCorruptedLuceneSegmentsAction.java | 100 +++ .../RemoveCorruptedShardDataCommand.java | 545 +++++++++++++++ .../elasticsearch/index/shard/ShardPath.java | 25 +- .../index/shard/ShardToolCli.java | 39 ++ .../index/translog/TranslogHeader.java | 3 +- .../index/translog/TranslogToolCli.java | 5 +- .../translog/TruncateTranslogAction.java | 245 +++++++ .../translog/TruncateTranslogCommand.java | 254 ------- .../index/shard/IndexShardTests.java | 20 +- .../RemoveCorruptedShardDataCommandIT.java | 652 ++++++++++++++++++ .../RemoveCorruptedShardDataCommandTests.java | 409 +++++++++++ .../index/store/CorruptedTranslogIT.java | 2 +- .../index/translog/TestTranslog.java | 11 +- .../index/translog/TruncateTranslogIT.java | 382 ---------- .../index/shard/IndexShardTestCase.java | 9 +- .../elasticsearch/test/CorruptionUtils.java | 18 + 29 files changed, 2326 insertions(+), 720 deletions(-) create mode 100755 distribution/src/bin/elasticsearch-shard create mode 100644 distribution/src/bin/elasticsearch-shard.bat create mode 100644 docs/reference/commands/shard-tool.asciidoc create mode 100644 server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java create mode 100644 server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java create mode 100644 server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java create mode 100644 server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java delete mode 100644 server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java create mode 100644 server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java create mode 100644 server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java delete mode 100644 server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java diff --git a/distribution/src/bin/elasticsearch-shard b/distribution/src/bin/elasticsearch-shard new file mode 100755 index 0000000000000..4c14a0434175b --- /dev/null +++ b/distribution/src/bin/elasticsearch-shard @@ -0,0 +1,5 @@ +#!/bin/bash + +ES_MAIN_CLASS=org.elasticsearch.index.shard.ShardToolCli \ + "`dirname "$0"`"/elasticsearch-cli \ + "$@" diff --git a/distribution/src/bin/elasticsearch-shard.bat b/distribution/src/bin/elasticsearch-shard.bat new file mode 100644 index 0000000000000..e861b197e873d --- /dev/null +++ b/distribution/src/bin/elasticsearch-shard.bat @@ -0,0 +1,12 @@ +@echo off + +setlocal enabledelayedexpansion +setlocal enableextensions + +set ES_MAIN_CLASS=org.elasticsearch.index.shard.ShardToolCli +call "%~dp0elasticsearch-cli.bat" ^ + %%* ^ + || exit /b 1 + +endlocal +endlocal diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 134ac1edbd017..8f4d178a99296 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -12,6 +12,7 @@ tasks from the command line: * <> * <> * <> +* <> * <> * <> @@ -22,5 +23,6 @@ include::certutil.asciidoc[] include::migrate-tool.asciidoc[] include::saml-metadata.asciidoc[] include::setup-passwords.asciidoc[] +include::shard-tool.asciidoc[] include::syskeygen.asciidoc[] include::users-command.asciidoc[] diff --git a/docs/reference/commands/shard-tool.asciidoc b/docs/reference/commands/shard-tool.asciidoc new file mode 100644 index 0000000000000..6fca1355a27be --- /dev/null +++ b/docs/reference/commands/shard-tool.asciidoc @@ -0,0 +1,107 @@ +[[shard-tool]] +== elasticsearch-shard + +In some cases the Lucene index or translog of a shard copy can become +corrupted. The `elasticsearch-shard` command enables you to remove corrupted +parts of the shard if a good copy of the shard cannot be recovered +automatically or restored from backup. + +[WARNING] +You will lose the corrupted data when you run `elasticsearch-shard`. This tool +should only be used as a last resort if there is no way to recover from another +copy of the shard or restore a snapshot. + +When Elasticsearch detects that a shard's data is corrupted, it fails that +shard copy and refuses to use it. Under normal conditions, the shard is +automatically recovered from another copy. If no good copy of the shard is +available and you cannot restore from backup, you can use `elasticsearch-shard` +to remove the corrupted data and restore access to any remaining data in +unaffected segments. + +[WARNING] +Stop Elasticsearch before running `elasticsearch-shard`. + +To remove corrupted shard data use the `remove-corrupted-data` subcommand. + +There are two ways to specify the path: + +* Specify the index name and shard name with the `--index` and `--shard-id` + options. +* Use the `--dir` option to specify the full path to the corrupted index or + translog files. + +[float] +=== Removing corrupted data + +`elasticsearch-shard` analyses the shard copy and provides an overview of the +corruption found. To proceed you must then confirm that you want to remove the +corrupted data. + +[WARNING] +Back up your data before running `elasticsearch-shard`. This is a destructive +operation that removes corrupted data from the shard. + +[source,txt] +-------------------------------------------------- +$ bin/elasticsearch-shard remove-corrupted-data --index twitter --shard-id 0 + + + WARNING: Elasticsearch MUST be stopped before running this tool. + + Please make a complete backup of your index before using this tool. + + +Opening Lucene index at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ + + >> Lucene index is corrupted at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ + +Opening translog at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ + + + >> Translog is clean at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ + + + Corrupted Lucene index segments found - 32 documents will be lost. + + WARNING: YOU WILL LOSE DATA. + +Continue and remove docs from the index ? Y + +WARNING: 1 broken segments (containing 32 documents) detected +Took 0.056 sec total. +Writing... +OK +Wrote new segments file "segments_c" +Marking index with the new history uuid : 0pIBd9VTSOeMfzYT6p0AsA +Changing allocation id V8QXk-QXSZinZMT-NvEq4w to tjm9Ve6uTBewVFAlfUMWjA + +You should run the following command to allocate this shard: + +POST /_cluster/reroute +{ + "commands" : [ + { + "allocate_stale_primary" : { + "index" : "index42", + "shard" : 0, + "node" : "II47uXW2QvqzHBnMcl2o_Q", + "accept_data_loss" : false + } + } + ] +} + +You must accept the possibility of data loss by changing parameter `accept_data_loss` to `true`. + +Deleted corrupt marker corrupted_FzTSBSuxT7i3Tls_TgwEag from /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ + +-------------------------------------------------- + +When you use `elasticsearch-shard` to drop the corrupted data, the shard's +allocation ID changes. After restarting the node, you must use the +<> to tell Elasticsearch to use the new +ID. The `elasticsearch-shard` command shows the request that +you need to submit. + +You can also use the `-h` option to get a list of all options and parameters +that the `elasticsearch-shard` tool supports. diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index bed19bd5be1df..713a352210054 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -92,6 +92,10 @@ The maximum duration for which translog files will be kept. Defaults to `12h`. [[corrupt-translog-truncation]] === What to do if the translog becomes corrupted? +[WARNING] +This tool is deprecated and will be completely removed in 7.0. +Use the <> instead of this one. + In some cases (a bad drive, user error) the translog on a shard copy can become corrupted. When this corruption is detected by Elasticsearch due to mismatching checksums, Elasticsearch will fail that shard copy and refuse to use that copy diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java index d9923def6ca0a..a0ebff5d67041 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java @@ -85,12 +85,17 @@ public final void println(Verbosity verbosity, String msg) { /** Prints message to the terminal at {@code verbosity} level, without a newline. */ public final void print(Verbosity verbosity, String msg) { - if (this.verbosity.ordinal() >= verbosity.ordinal()) { + if (isPrintable(verbosity)) { getWriter().print(msg); getWriter().flush(); } } + /** Checks if is enough {@code verbosity} level to be printed */ + public final boolean isPrintable(Verbosity verbosity) { + return this.verbosity.ordinal() >= verbosity.ordinal(); + } + /** * Prompt for a yes or no answer from the user. This method will loop until 'y' or 'n' * (or the default empty value) is entered. diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java index 83edc8a0a9390..0108f88ecd166 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -325,4 +325,21 @@ public void test90SecurityCliPackaging() { } } + public void test100RepairIndexCliPackaging() { + assumeThat(installation, is(notNullValue())); + + final Installation.Executables bin = installation.executables(); + final Shell sh = new Shell(); + + Platforms.PlatformAction action = () -> { + final Result result = sh.run(bin.elasticsearchShard + " help"); + assertThat(result.stdout, containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); + }; + + if (distribution().equals(Distribution.DEFAULT_TAR) || distribution().equals(Distribution.DEFAULT_ZIP)) { + Platforms.onLinux(action); + Platforms.onWindows(action); + } + } + } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java index 9e9a453ca8422..45629f286fcc4 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java @@ -186,6 +186,7 @@ private static void verifyOssInstallation(Installation es, Distribution distribu "elasticsearch-env", "elasticsearch-keystore", "elasticsearch-plugin", + "elasticsearch-shard", "elasticsearch-translog" ).forEach(executable -> { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java index 8bc3fc6e14d3b..620ccd5e442d7 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java @@ -100,8 +100,9 @@ public class Executables { public final Path elasticsearch = platformExecutable("elasticsearch"); public final Path elasticsearchPlugin = platformExecutable("elasticsearch-plugin"); public final Path elasticsearchKeystore = platformExecutable("elasticsearch-keystore"); - public final Path elasticsearchTranslog = platformExecutable("elasticsearch-translog"); public final Path elasticsearchCertutil = platformExecutable("elasticsearch-certutil"); + public final Path elasticsearchShard = platformExecutable("elasticsearch-shard"); + public final Path elasticsearchTranslog = platformExecutable("elasticsearch-translog"); private Path platformExecutable(String name) { final String platformExecutableName = Platforms.WINDOWS diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java index be7edc5e8f9e4..56de822316634 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java @@ -187,6 +187,7 @@ private static void verifyOssInstallation(Installation es, Distribution distribu "elasticsearch", "elasticsearch-plugin", "elasticsearch-keystore", + "elasticsearch-shard", "elasticsearch-translog" ).forEach(executable -> assertThat(es.bin(executable), file(File, "root", "root", p755))); diff --git a/qa/vagrant/src/test/resources/packaging/utils/packages.bash b/qa/vagrant/src/test/resources/packaging/utils/packages.bash index 57f1ebd1c6106..f6ba68d84d483 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/packages.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/packages.bash @@ -95,6 +95,7 @@ verify_package_installation() { assert_file "$ESHOME/bin" d root root 755 assert_file "$ESHOME/bin/elasticsearch" f root root 755 assert_file "$ESHOME/bin/elasticsearch-plugin" f root root 755 + assert_file "$ESHOME/bin/elasticsearch-shard" f root root 755 assert_file "$ESHOME/bin/elasticsearch-translog" f root root 755 assert_file "$ESHOME/lib" d root root 755 assert_file "$ESCONFIG" d root elasticsearch 2750 diff --git a/qa/vagrant/src/test/resources/packaging/utils/tar.bash b/qa/vagrant/src/test/resources/packaging/utils/tar.bash index 4ded1f73514b2..23901cbae99b7 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/tar.bash @@ -94,6 +94,7 @@ verify_archive_installation() { assert_file "$ESHOME/bin/elasticsearch-env" f elasticsearch elasticsearch 755 assert_file "$ESHOME/bin/elasticsearch-keystore" f elasticsearch elasticsearch 755 assert_file "$ESHOME/bin/elasticsearch-plugin" f elasticsearch elasticsearch 755 + assert_file "$ESHOME/bin/elasticsearch-shard" f elasticsearch elasticsearch 755 assert_file "$ESHOME/bin/elasticsearch-translog" f elasticsearch elasticsearch 755 assert_file "$ESCONFIG" d elasticsearch elasticsearch 755 assert_file "$ESCONFIG/elasticsearch.yml" f elasticsearch elasticsearch 660 diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 29d3207c73ac2..538a8edd995ae 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -30,6 +30,8 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -75,6 +77,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static java.util.Collections.unmodifiableSet; @@ -171,6 +174,63 @@ public String toString() { public static final String INDICES_FOLDER = "indices"; public static final String NODE_LOCK_FILENAME = "node.lock"; + public static class NodeLock implements Releasable { + + private final int nodeId; + private final Lock[] locks; + private final NodePath[] nodePaths; + + /** + * Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it + * @param pathFunction function to check node path before attempt of acquiring a node lock + */ + public NodeLock(final int nodeId, final Logger logger, + final Environment environment, + final CheckedFunction pathFunction) throws IOException { + this.nodeId = nodeId; + nodePaths = new NodePath[environment.dataWithClusterFiles().length]; + locks = new Lock[nodePaths.length]; + try { + final Path[] dataPaths = environment.dataFiles(); + for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) { + Path dataDir = dataPaths[dirIndex]; + Path dir = resolveNodePath(dataDir, nodeId); + if (pathFunction.apply(dir) == false) { + continue; + } + try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { + logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); + locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME); + nodePaths[dirIndex] = new NodePath(dir); + } catch (IOException e) { + logger.trace(() -> new ParameterizedMessage( + "failed to obtain node lock on {}", dir.toAbsolutePath()), e); + // release all the ones that were obtained up until now + throw (e instanceof LockObtainFailedException ? e + : new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e)); + } + } + } catch (IOException e) { + close(); + throw e; + } + } + + public NodePath[] getNodePaths() { + return nodePaths; + } + + @Override + public void close() { + for (int i = 0; i < locks.length; i++) { + if (locks[i] != null) { + IOUtils.closeWhileHandlingException(locks[i]); + } + locks[i] = null; + } + } + } + /** * Setup the environment. * @param settings settings from elasticsearch.yml @@ -188,51 +248,39 @@ public NodeEnvironment(Settings settings, Environment environment, Consumer new ParameterizedMessage( - "failed to obtain node lock on {}", dir.toAbsolutePath()), e); - lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e); - // release all the ones that were obtained up until now - releaseAndNullLocks(locks); - break; - } - } - if (locks[0] != null) { - // we found a lock, break + final AtomicReference onCreateDirectoriesException = new AtomicReference<>(); + for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) { + try { + nodeLock = new NodeLock(possibleLockId, logger, environment, + dir -> { + try { + Files.createDirectories(dir); + } catch (IOException e) { + onCreateDirectoriesException.set(e); + throw e; + } + return true; + }); break; + } catch (LockObtainFailedException e) { + // ignore any LockObtainFailedException + } catch (IOException e) { + if (onCreateDirectoriesException.get() != null) { + throw onCreateDirectoriesException.get(); + } + lastException = e; } } - if (locks[0] == null) { + if (nodeLock == null) { final String message = String.format( Locale.ROOT, "failed to obtain node locks, tried [%s] with lock id%s;" + @@ -243,13 +291,12 @@ public NodeEnvironment(Settings settings, Environment environment, Consumer getCleanStatus(ShardPath shardPath, + Directory indexDirectory, + Lock writeLock, + PrintStream printStream, + boolean verbose) throws IOException { + if (RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(indexDirectory) == false) { + return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null); + } + + final CheckIndex.Status status; + try (CheckIndex checker = new CheckIndex(indexDirectory, writeLock)) { + checker.setChecksumsOnly(true); + checker.setInfoStream(printStream, verbose); + + status = checker.checkIndex(null); + + if (status.missingSegments) { + return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.UNRECOVERABLE, + "Index is unrecoverable - there are missing segments"); + } + + return status.clean + ? Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN_WITH_CORRUPTED_MARKER, null) + : Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CORRUPTED, + "Corrupted Lucene index segments found - " + status.totLoseDocCount + " documents will be lost."); + } + } + + public void execute(Terminal terminal, + ShardPath shardPath, + Directory indexDirectory, + Lock writeLock, + PrintStream printStream, + boolean verbose) throws IOException { + checkCorruptMarkerFileIsPresent(indexDirectory); + + final CheckIndex.Status status; + try (CheckIndex checker = new CheckIndex(indexDirectory, writeLock)) { + + checker.setChecksumsOnly(true); + checker.setInfoStream(printStream, verbose); + + status = checker.checkIndex(null); + + if (status.missingSegments == false) { + if (status.clean == false) { + terminal.println("Writing..."); + checker.exorciseIndex(status); + + terminal.println("OK"); + terminal.println("Wrote new segments file \"" + status.segmentsFileName + "\""); + } + } else { + throw new ElasticsearchException("Index is unrecoverable - there are missing segments"); + } + } + } + + protected void checkCorruptMarkerFileIsPresent(Directory directory) throws IOException { + if (RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(directory) == false) { + throw new ElasticsearchException("There is no corruption file marker"); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java new file mode 100644 index 0000000000000..de22903efb334 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -0,0 +1,545 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.store.NativeFSLockFactory; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.AllocationId; +import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.TruncateTranslogAction; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public class RemoveCorruptedShardDataCommand extends EnvironmentAwareCommand { + + private static final Logger logger = Loggers.getLogger(RemoveCorruptedShardDataCommand.class); + + private final OptionSpec folderOption; + private final OptionSpec indexNameOption; + private final OptionSpec shardIdOption; + + private final RemoveCorruptedLuceneSegmentsAction removeCorruptedLuceneSegmentsAction; + private final TruncateTranslogAction truncateTranslogAction; + private final NamedXContentRegistry namedXContentRegistry; + + public RemoveCorruptedShardDataCommand() { + this(false); + } + + public RemoveCorruptedShardDataCommand(boolean translogOnly) { + super("Removes corrupted shard files"); + + folderOption = parser.acceptsAll(Arrays.asList("d", "dir"), + "Index directory location on disk") + .withRequiredArg(); + + indexNameOption = parser.accepts("index", "Index name") + .withRequiredArg(); + + shardIdOption = parser.accepts("shard-id", "Shard id") + .withRequiredArg() + .ofType(Integer.class); + + namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); + + removeCorruptedLuceneSegmentsAction = translogOnly ? null : new RemoveCorruptedLuceneSegmentsAction(); + truncateTranslogAction = new TruncateTranslogAction(namedXContentRegistry); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + if (removeCorruptedLuceneSegmentsAction == null) { + // that's only for 6.x branch for bwc with elasticsearch-translog + terminal.println("This tool truncates the translog and translog checkpoint files to create a new translog"); + } else { + terminal.println("This tool attempts to detect and remove unrecoverable corrupted data in a shard."); + } + } + + // Visible for testing + public OptionParser getParser() { + return this.parser; + } + + @SuppressForbidden(reason = "Necessary to use the path passed in") + protected Path getPath(String dirValue) { + return PathUtils.get(dirValue, "", ""); + } + + protected void findAndProcessShardPath(OptionSet options, Environment environment, CheckedConsumer consumer) + throws IOException { + final Settings settings = environment.settings(); + + final String indexName; + final int shardId; + final int fromNodeId; + final int toNodeId; + + if (options.has(folderOption)) { + final Path path = getPath(folderOption.value(options)).getParent(); + final Path shardParent = path.getParent(); + final Path shardParentParent = shardParent.getParent(); + final Path indexPath = path.resolve(ShardPath.INDEX_FOLDER_NAME); + if (Files.exists(indexPath) == false || Files.isDirectory(indexPath) == false) { + throw new ElasticsearchException("index directory [" + indexPath + "], must exist and be a directory"); + } + + final IndexMetaData indexMetaData = + IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardParent); + + final String shardIdFileName = path.getFileName().toString(); + final String nodeIdFileName = shardParentParent.getParent().getFileName().toString(); + if (Files.isDirectory(path) && shardIdFileName.chars().allMatch(Character::isDigit) // SHARD-ID path element check + && NodeEnvironment.INDICES_FOLDER.equals(shardParentParent.getFileName().toString()) // `indices` check + && nodeIdFileName.chars().allMatch(Character::isDigit) // NODE-ID check + && NodeEnvironment.NODES_FOLDER.equals(shardParentParent.getParent().getParent().getFileName().toString()) // `nodes` check + ) { + shardId = Integer.parseInt(shardIdFileName); + indexName = indexMetaData.getIndex().getName(); + fromNodeId = Integer.parseInt(nodeIdFileName); + toNodeId = fromNodeId + 1; + } else { + throw new ElasticsearchException("Unable to resolve shard id. Wrong folder structure at [ " + path.toString() + + " ], expected .../nodes/[NODE-ID]/indices/[INDEX-UUID]/[SHARD-ID]"); + } + } else { + // otherwise resolve shardPath based on the index name and shard id + indexName = Objects.requireNonNull(indexNameOption.value(options), "Index name is required"); + shardId = Objects.requireNonNull(shardIdOption.value(options), "Shard ID is required"); + + // resolve shard path in case of multi-node layout per environment + fromNodeId = 0; + toNodeId = NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.get(settings); + } + + // have to iterate over possibleLockId as NodeEnvironment; on a contrast to it - we have to fail if node is busy + for (int possibleLockId = fromNodeId; possibleLockId < toNodeId; possibleLockId++) { + try { + try (NodeEnvironment.NodeLock nodeLock = new NodeEnvironment.NodeLock(possibleLockId, logger, environment, Files::exists)) { + final NodeEnvironment.NodePath[] nodePaths = nodeLock.getNodePaths(); + for (NodeEnvironment.NodePath nodePath : nodePaths) { + if (Files.exists(nodePath.indicesPath)) { + // have to scan all index uuid folders to resolve from index name + try (DirectoryStream stream = Files.newDirectoryStream(nodePath.indicesPath)) { + for (Path file : stream) { + if (Files.exists(file.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) { + continue; + } + + final IndexMetaData indexMetaData = + IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, file); + if (indexMetaData == null) { + continue; + } + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + final Index index = indexMetaData.getIndex(); + if (indexName.equals(index.getName()) == false) { + continue; + } + final ShardId shId = new ShardId(index, shardId); + + final Path shardPathLocation = nodePath.resolve(shId); + if (Files.exists(shardPathLocation) == false) { + continue; + } + final ShardPath shardPath = ShardPath.loadShardPath(logger, shId, indexSettings, + new Path[]{shardPathLocation}, possibleLockId, nodePath.path); + if (shardPath != null) { + consumer.accept(shardPath); + return; + } + } + } + } + } + } + } catch (LockObtainFailedException lofe) { + throw new ElasticsearchException("Failed to lock node's directory [" + lofe.getMessage() + + "], is Elasticsearch still running ?"); + } + } + throw new ElasticsearchException("Unable to resolve shard path for index [" + indexName + "] and shard id [" + shardId + "]"); + } + + public static boolean isCorruptMarkerFileIsPresent(final Directory directory) throws IOException { + boolean found = false; + + final String[] files = directory.listAll(); + for (String file : files) { + if (file.startsWith(Store.CORRUPTED)) { + found = true; + break; + } + } + + return found; + } + + protected void dropCorruptMarkerFiles(Terminal terminal, Path path, Directory directory, boolean clean) throws IOException { + if (clean) { + confirm("This shard has been marked as corrupted but no corruption can now be detected.\n" + + "This may indicate an intermittent hardware problem. The corruption marker can be \n" + + "removed, but there is a risk that data has been undetectably lost.\n\n" + + "Are you taking a risk of losing documents and proceed with removing a corrupted marker ?", + terminal); + } + String[] files = directory.listAll(); + boolean found = false; + for (String file : files) { + if (file.startsWith(Store.CORRUPTED)) { + directory.deleteFile(file); + + terminal.println("Deleted corrupt marker " + file + " from " + path); + } + } + } + + private static void loseDataDetailsBanner(Terminal terminal, Tuple cleanStatus) { + if (cleanStatus.v2() != null) { + terminal.println(""); + terminal.println(" " + cleanStatus.v2()); + terminal.println(""); + } + } + + private static void confirm(String msg, Terminal terminal) { + terminal.println(msg); + String text = terminal.readText("Confirm [y/N] "); + if (text.equalsIgnoreCase("y") == false) { + throw new ElasticsearchException("aborted by user"); + } + } + + private void warnAboutESShouldBeStopped(Terminal terminal) { + terminal.println("-----------------------------------------------------------------------"); + terminal.println(""); + terminal.println(" WARNING: Elasticsearch MUST be stopped before running this tool."); + terminal.println(""); + // that's only for 6.x branch for bwc with elasticsearch-translog + if (removeCorruptedLuceneSegmentsAction == null) { + terminal.println(" This tool is deprecated and will be completely removed in 7.0."); + terminal.println(" It is replaced by the elasticsearch-shard tool. "); + terminal.println(""); + } + terminal.println(" Please make a complete backup of your index before using this tool."); + terminal.println(""); + terminal.println("-----------------------------------------------------------------------"); + } + + // Visible for testing + @Override + public void execute(Terminal terminal, OptionSet options, Environment environment) throws Exception { + warnAboutESShouldBeStopped(terminal); + + findAndProcessShardPath(options, environment, shardPath -> { + final Path indexPath = shardPath.resolveIndex(); + final Path translogPath = shardPath.resolveTranslog(); + final Path nodePath = getNodePath(shardPath); + if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) { + throw new ElasticsearchException("translog directory [" + translogPath + "], must exist and be a directory"); + } + + final PrintWriter writer = terminal.getWriter(); + final PrintStream printStream = new PrintStream(new OutputStream() { + @Override + public void write(int b) { + writer.write(b); + } + }, false, "UTF-8"); + final boolean verbose = terminal.isPrintable(Terminal.Verbosity.VERBOSE); + + final Directory indexDirectory = getDirectory(indexPath); + + final Tuple indexCleanStatus; + final Tuple translogCleanStatus; + try (Directory indexDir = indexDirectory) { + // keep the index lock to block any runs of older versions of this tool + try (Lock writeIndexLock = indexDir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { + ////////// Index + // that's only for 6.x branch for bwc with elasticsearch-translog + if (removeCorruptedLuceneSegmentsAction != null) { + terminal.println(""); + terminal.println("Opening Lucene index at " + indexPath); + terminal.println(""); + try { + indexCleanStatus = removeCorruptedLuceneSegmentsAction.getCleanStatus(shardPath, indexDir, + writeIndexLock, printStream, verbose); + } catch (Exception e) { + terminal.println(e.getMessage()); + throw e; + } + + terminal.println(""); + terminal.println(" >> Lucene index is " + indexCleanStatus.v1().getMessage() + " at " + indexPath); + terminal.println(""); + } else { + indexCleanStatus = Tuple.tuple(CleanStatus.CLEAN, null); + } + + ////////// Translog + // as translog relies on data stored in an index commit - we have to have non unrecoverable index to truncate translog + if (indexCleanStatus.v1() != CleanStatus.UNRECOVERABLE) { + terminal.println(""); + terminal.println("Opening translog at " + translogPath); + terminal.println(""); + try { + translogCleanStatus = truncateTranslogAction.getCleanStatus(shardPath, indexDir); + } catch (Exception e) { + terminal.println(e.getMessage()); + throw e; + } + + terminal.println(""); + terminal.println(" >> Translog is " + translogCleanStatus.v1().getMessage() + " at " + translogPath); + terminal.println(""); + } else { + translogCleanStatus = Tuple.tuple(CleanStatus.UNRECOVERABLE, null); + } + + ////////// Drop corrupted data + final CleanStatus indexStatus = indexCleanStatus.v1(); + final CleanStatus translogStatus = translogCleanStatus.v1(); + + if (indexStatus == CleanStatus.CLEAN && translogStatus == CleanStatus.CLEAN) { + throw new ElasticsearchException("Shard does not seem to be corrupted at " + shardPath.getDataPath()); + } + + if (indexStatus == CleanStatus.UNRECOVERABLE) { + if (indexCleanStatus.v2() != null) { + terminal.println("Details: " + indexCleanStatus.v2()); + } + + terminal.println("You can allocate a new, empty, primary shard with the following command:"); + + printRerouteCommand(shardPath, terminal, false); + + throw new ElasticsearchException("Index is unrecoverable"); + } + + + terminal.println("-----------------------------------------------------------------------"); + if (indexStatus != CleanStatus.CLEAN) { + loseDataDetailsBanner(terminal, indexCleanStatus); + } + if (translogStatus != CleanStatus.CLEAN) { + loseDataDetailsBanner(terminal, translogCleanStatus); + } + terminal.println(" WARNING: YOU MAY LOSE DATA."); + terminal.println("-----------------------------------------------------------------------"); + + + confirm("Continue and remove corrupted data from the shard ?", terminal); + + if (indexStatus != CleanStatus.CLEAN) { + removeCorruptedLuceneSegmentsAction.execute(terminal, shardPath, indexDir, + writeIndexLock, printStream, verbose); + } + + if (translogStatus != CleanStatus.CLEAN) { + truncateTranslogAction.execute(terminal, shardPath, indexDir); + } + } catch (LockObtainFailedException lofe) { + final String msg = "Failed to lock shard's directory at [" + indexPath + "], is Elasticsearch still running?"; + terminal.println(msg); + throw new ElasticsearchException(msg); + } + + final CleanStatus indexStatus = indexCleanStatus.v1(); + final CleanStatus translogStatus = translogCleanStatus.v1(); + + // newHistoryCommit obtains its own lock + addNewHistoryCommit(indexDir, terminal, translogStatus != CleanStatus.CLEAN); + newAllocationId(environment, shardPath, terminal); + if (indexStatus != CleanStatus.CLEAN) { + dropCorruptMarkerFiles(terminal, indexPath, indexDir, indexStatus == CleanStatus.CLEAN_WITH_CORRUPTED_MARKER); + } + } + }); + } + + private Directory getDirectory(Path indexPath) { + Directory directory; + try { + directory = FSDirectory.open(indexPath, NativeFSLockFactory.INSTANCE); + } catch (Throwable t) { + throw new ElasticsearchException("ERROR: could not open directory \"" + indexPath + "\"; exiting"); + } + return directory; + } + + protected void addNewHistoryCommit(Directory indexDirectory, Terminal terminal, boolean updateLocalCheckpoint) throws IOException { + final String historyUUID = UUIDs.randomBase64UUID(); + + terminal.println("Marking index with the new history uuid : " + historyUUID); + // commit the new history id + final IndexWriterConfig iwc = new IndexWriterConfig(null) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setCommitOnClose(false) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); + // IndexWriter acquires directory lock by its own + try (IndexWriter indexWriter = new IndexWriter(indexDirectory, iwc)) { + final Map userData = new HashMap<>(); + indexWriter.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); + + if (updateLocalCheckpoint) { + // In order to have a safe commit invariant, we have to assign the global checkpoint to the max_seqno of the last commit. + // We can only safely do it because we will generate a new history uuid this shard. + final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet()); + // Also advances the local checkpoint of the last commit to its max_seqno. + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(commitInfo.maxSeqNo)); + } + + // commit the new history id + userData.put(Engine.HISTORY_UUID_KEY, historyUUID); + + indexWriter.setLiveCommitData(userData.entrySet()); + indexWriter.commit(); + } + } + + protected void newAllocationId(Environment environment, ShardPath shardPath, Terminal terminal) throws IOException { + final Path shardStatePath = shardPath.getShardStatePath(); + final ShardStateMetaData shardStateMetaData = + ShardStateMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardStatePath); + + if (shardStateMetaData == null) { + throw new ElasticsearchException("No shard state meta data at " + shardStatePath); + } + + final AllocationId newAllocationId = AllocationId.newInitializing(); + + terminal.println("Changing allocation id " + shardStateMetaData.allocationId.getId() + + " to " + newAllocationId.getId()); + + final ShardStateMetaData newShardStateMetaData = + new ShardStateMetaData(shardStateMetaData.primary, shardStateMetaData.indexUUID, newAllocationId); + + ShardStateMetaData.FORMAT.write(newShardStateMetaData, shardStatePath); + + terminal.println(""); + terminal.println("You should run the following command to allocate this shard:"); + + printRerouteCommand(shardPath, terminal, true); + } + + private void printRerouteCommand(ShardPath shardPath, Terminal terminal, boolean allocateStale) throws IOException { + final IndexMetaData indexMetaData = + IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, + shardPath.getDataPath().getParent()); + + final Path nodePath = getNodePath(shardPath); + final NodeMetaData nodeMetaData = + NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodePath); + + if (nodeMetaData == null) { + throw new ElasticsearchException("No node meta data at " + nodePath); + } + + final String nodeId = nodeMetaData.nodeId(); + final String index = indexMetaData.getIndex().getName(); + final int id = shardPath.getShardId().id(); + final AllocationCommands commands = new AllocationCommands( + allocateStale + ? new AllocateStalePrimaryAllocationCommand(index, id, nodeId, false) + : new AllocateEmptyPrimaryAllocationCommand(index, id, nodeId, false)); + + terminal.println(""); + terminal.println("POST /_cluster/reroute'\n" + + Strings.toString(commands, true, true) + "'"); + terminal.println(""); + terminal.println("You must accept the possibility of data loss by changing parameter `accept_data_loss` to `true`."); + terminal.println(""); + } + + private Path getNodePath(ShardPath shardPath) { + final Path nodePath = shardPath.getDataPath().getParent().getParent().getParent(); + if (Files.exists(nodePath) == false || Files.exists(nodePath.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) { + throw new ElasticsearchException("Unable to resolve node path for " + shardPath); + } + return nodePath; + } + + public enum CleanStatus { + CLEAN("clean"), + CLEAN_WITH_CORRUPTED_MARKER("marked corrupted, but no corruption detected"), + CORRUPTED("corrupted"), + UNRECOVERABLE("corrupted and unrecoverable"); + + private final String msg; + + CleanStatus(String msg) { + this.msg = msg; + } + + public String getMessage() { + return msg; + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 7cb719e41f433..ffce215646443 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -112,16 +112,31 @@ public boolean isCustomDataPath() { * Note: this method resolves custom data locations for the shard. */ public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException { - final String indexUUID = indexSettings.getUUID(); final Path[] paths = env.availableShardPaths(shardId); + final int nodeLockId = env.getNodeLockId(); + final Path sharedDataPath = env.sharedDataPath(); + return loadShardPath(logger, shardId, indexSettings, paths, nodeLockId, sharedDataPath); + } + + /** + * This method walks through the nodes shard paths to find the data and state path for the given shard. If multiple + * directories with a valid shard state exist the one with the highest version will be used. + * Note: this method resolves custom data locations for the shard. + */ + public static ShardPath loadShardPath(Logger logger, ShardId shardId, IndexSettings indexSettings, Path[] availableShardPaths, + int nodeLockId, Path sharedDataPath) throws IOException { + final String indexUUID = indexSettings.getUUID(); Path loadedPath = null; - for (Path path : paths) { + for (Path path : availableShardPaths) { // EMPTY is safe here because we never call namedObject ShardStateMetaData load = ShardStateMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); if (load != null) { if (load.indexUUID.equals(indexUUID) == false && IndexMetaData.INDEX_UUID_NA_VALUE.equals(load.indexUUID) == false) { - logger.warn("{} found shard on path: [{}] with a different index UUID - this shard seems to be leftover from a different index with the same name. Remove the leftover shard in order to reuse the path with the current index", shardId, path); - throw new IllegalStateException(shardId + " index UUID in shard state was: " + load.indexUUID + " expected: " + indexUUID + " on shard path: " + path); + logger.warn("{} found shard on path: [{}] with a different index UUID - this " + + "shard seems to be leftover from a different index with the same name. " + + "Remove the leftover shard in order to reuse the path with the current index", shardId, path); + throw new IllegalStateException(shardId + " index UUID in shard state was: " + load.indexUUID + + " expected: " + indexUUID + " on shard path: " + path); } if (loadedPath == null) { loadedPath = path; @@ -137,7 +152,7 @@ public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardI final Path dataPath; final Path statePath = loadedPath; if (indexSettings.hasCustomDataPath()) { - dataPath = env.resolveCustomLocation(indexSettings, shardId); + dataPath = NodeEnvironment.resolveCustomLocation(indexSettings, shardId, sharedDataPath, nodeLockId); } else { dataPath = statePath; } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java b/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java new file mode 100644 index 0000000000000..62693d2b60b78 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.elasticsearch.cli.LoggingAwareMultiCommand; +import org.elasticsearch.cli.Terminal; + +/** + * Class encapsulating and dispatching commands from the {@code elasticsearch-shard} command line tool + */ +public class ShardToolCli extends LoggingAwareMultiCommand { + + private ShardToolCli() { + super("A CLI tool to remove corrupted parts of unrecoverable shards"); + subcommands.put("remove-corrupted-data", new RemoveCorruptedShardDataCommand()); + } + + public static void main(String[] args) throws Exception { + exit(new ShardToolCli().main(args, Terminal.DEFAULT)); + } + +} + diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java index 20aadf21bcb48..d80a6729d30bc 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java @@ -144,7 +144,6 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil final long primaryTerm; if (version == VERSION_PRIMARY_TERM) { primaryTerm = in.readLong(); - assert primaryTerm >= 0 : "Primary term must be non-negative [" + primaryTerm + "]; translog path [" + path + "]"; } else { assert version == VERSION_CHECKPOINTS : "Unknown header version [" + version + "]"; primaryTerm = UNKNOWN_PRIMARY_TERM; @@ -153,6 +152,8 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil if (version >= VERSION_PRIMARY_TERM) { Translog.verifyChecksum(in); } + assert primaryTerm >= 0 : "Primary term must be non-negative [" + primaryTerm + "]; translog path [" + path + "]"; + final int headerSizeInBytes = headerSizeInBytes(version, uuid.length); assert channel.position() == headerSizeInBytes : "Header is not fully read; header size [" + headerSizeInBytes + "], position [" + channel.position() + "]"; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java index f7d830a32ec1b..a8a8d735f9a0c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java @@ -21,15 +21,18 @@ import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.index.shard.RemoveCorruptedShardDataCommand; /** * Class encapsulating and dispatching commands from the {@code elasticsearch-translog} command line tool */ +@Deprecated public class TranslogToolCli extends LoggingAwareMultiCommand { private TranslogToolCli() { + // that's only for 6.x branch for bwc with elasticsearch-translog super("A CLI tool for various Elasticsearch translog actions"); - subcommands.put("truncate", new TruncateTranslogCommand()); + subcommands.put("truncate", new RemoveCorruptedShardDataCommand(true)); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java new file mode 100644 index 0000000000000..0b9c365509685 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -0,0 +1,245 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.store.Directory; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.RemoveCorruptedShardDataCommand; +import org.elasticsearch.index.shard.ShardPath; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +public class TruncateTranslogAction { + + protected static final Logger logger = Loggers.getLogger(TruncateTranslogAction.class); + private final NamedXContentRegistry namedXContentRegistry; + + public TruncateTranslogAction(NamedXContentRegistry namedXContentRegistry) { + this.namedXContentRegistry = namedXContentRegistry; + } + + public Tuple getCleanStatus(ShardPath shardPath, + Directory indexDirectory) throws IOException { + final Path indexPath = shardPath.resolveIndex(); + final Path translogPath = shardPath.resolveTranslog(); + final List commits; + try { + commits = DirectoryReader.listCommits(indexDirectory); + } catch (IndexNotFoundException infe) { + throw new ElasticsearchException("unable to find a valid shard at [" + indexPath + "]", infe); + } + + // Retrieve the generation and UUID from the existing data + final Map commitData = new HashMap<>(commits.get(commits.size() - 1).getUserData()); + final String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY); + + if (translogUUID == null) { + throw new ElasticsearchException("shard must have a valid translog UUID but got: [null]"); + } + + final boolean clean = isTranslogClean(shardPath, translogUUID); + + if (clean) { + return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null); + } + + // Hold the lock open for the duration of the tool running + Set translogFiles; + try { + translogFiles = filesInDirectory(translogPath); + } catch (IOException e) { + throw new ElasticsearchException("failed to find existing translog files", e); + } + final String details = deletingFilesDetails(translogPath, translogFiles); + + return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CORRUPTED, details); + } + + public void execute(Terminal terminal, ShardPath shardPath, Directory indexDirectory) throws IOException { + final Path indexPath = shardPath.resolveIndex(); + final Path translogPath = shardPath.resolveTranslog(); + + final String historyUUID = UUIDs.randomBase64UUID(); + final Map commitData; + // Hold the lock open for the duration of the tool running + Set translogFiles; + try { + terminal.println("Checking existing translog files"); + translogFiles = filesInDirectory(translogPath); + } catch (IOException e) { + terminal.println("encountered IOException while listing directory, aborting..."); + throw new ElasticsearchException("failed to find existing translog files", e); + } + + List commits; + try { + terminal.println("Reading translog UUID information from Lucene commit from shard at [" + indexPath + "]"); + commits = DirectoryReader.listCommits(indexDirectory); + } catch (IndexNotFoundException infe) { + throw new ElasticsearchException("unable to find a valid shard at [" + indexPath + "]", infe); + } + + // Retrieve the generation and UUID from the existing data + commitData = commits.get(commits.size() - 1).getUserData(); + final String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY); + final String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY); + if (translogGeneration == null || translogUUID == null) { + throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]", + translogGeneration, translogUUID); + } + + final long globalCheckpoint = commitData.containsKey(SequenceNumbers.MAX_SEQ_NO) + ? Long.parseLong(commitData.get(SequenceNumbers.MAX_SEQ_NO)) + : SequenceNumbers.UNASSIGNED_SEQ_NO; + + terminal.println("Translog Generation: " + translogGeneration); + terminal.println("Translog UUID : " + translogUUID); + terminal.println("History UUID : " + historyUUID); + + Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME); + Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME); + Path tempEmptyTranslog = translogPath.resolve("temp-" + Translog.TRANSLOG_FILE_PREFIX + + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); + Path realEmptyTranslog = translogPath.resolve(Translog.TRANSLOG_FILE_PREFIX + + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); + + // Write empty checkpoint and translog to empty files + long gen = Long.parseLong(translogGeneration); + int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID); + writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen, globalCheckpoint); + + terminal.println("Removing existing translog files"); + IOUtils.rm(translogFiles.toArray(new Path[]{})); + + terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]"); + Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE); + terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]"); + Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE); + + // Fsync the translog directory after rename + IOUtils.fsync(translogPath, true); + } + + private boolean isTranslogClean(ShardPath shardPath, String translogUUID) throws IOException { + // perform clean check of translog instead of corrupted marker file + boolean clean = true; + try { + final Path translogPath = shardPath.resolveTranslog(); + final long translogGlobalCheckpoint = Translog.readGlobalCheckpoint(translogPath, translogUUID); + final IndexMetaData indexMetaData = + IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardPath.getDataPath().getParent()); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + final TranslogConfig translogConfig = new TranslogConfig(shardPath.getShardId(), translogPath, + indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + long primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardPath.getShardId().id()); + final TranslogDeletionPolicy translogDeletionPolicy = + new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(), + indexSettings.getTranslogRetentionAge().getMillis()); + try (Translog translog = new Translog(translogConfig, translogUUID, + translogDeletionPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm); + Translog.Snapshot snapshot = translog.newSnapshot()) { + while (snapshot.next() != null) { + // just iterate over snapshot + } + } + } catch (TranslogCorruptedException e) { + clean = false; + } + return clean; + } + + /** Write a checkpoint file to the given location with the given generation */ + static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration, long globalCheckpoint) throws IOException { + Checkpoint emptyCheckpoint = Checkpoint.emptyTranslogCheckpoint(translogLength, translogGeneration, + globalCheckpoint, translogGeneration); + Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, + StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + // fsync with metadata here to make sure. + IOUtils.fsync(filename, false); + } + + /** + * Write a translog containing the given translog UUID to the given location. Returns the number of bytes written. + */ + private static int writeEmptyTranslog(Path filename, String translogUUID) throws IOException { + try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) { + TranslogHeader header = new TranslogHeader(translogUUID, TranslogHeader.UNKNOWN_PRIMARY_TERM); + header.write(fc); + return header.sizeInBytes(); + } + } + + /** Show a warning about deleting files, asking for a confirmation if {@code batchMode} is false */ + private String deletingFilesDetails(Path translogPath, Set files) { + StringBuilder builder = new StringBuilder(); + + builder + .append("Documents inside of translog files will be lost.\n") + .append(" The following files will be DELETED at ") + .append(translogPath) + .append("\n\n"); + for(Iterator it = files.iterator();it.hasNext();) { + builder.append(" --> ").append(it.next().getFileName()); + if (it.hasNext()) { + builder.append("\n"); + } + } + return builder.toString(); + } + + /** Return a Set of all files in a given directory */ + public static Set filesInDirectory(Path directory) throws IOException { + Set files = new TreeSet<>(); + try (DirectoryStream stream = Files.newDirectoryStream(directory)) { + for (Path file : stream) { + files.add(file); + } + } + return files; + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java deleted file mode 100644 index a90f8af0af42c..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog; - -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.store.Lock; -import org.apache.lucene.store.LockObtainFailedException; -import org.apache.lucene.store.NativeFSLockFactory; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.env.Environment; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.seqno.SequenceNumbers; - -import java.io.IOException; -import java.nio.channels.FileChannel; -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.nio.file.StandardOpenOption; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class TruncateTranslogCommand extends EnvironmentAwareCommand { - - private final OptionSpec translogFolder; - private final OptionSpec batchMode; - - public TruncateTranslogCommand() { - super("Truncates a translog to create a new, empty translog"); - this.translogFolder = parser.acceptsAll(Arrays.asList("d", "dir"), - "Translog Directory location on disk") - .withRequiredArg() - .required(); - this.batchMode = parser.acceptsAll(Arrays.asList("b", "batch"), - "Enable batch mode explicitly, automatic confirmation of warnings"); - } - - // Visible for testing - public OptionParser getParser() { - return this.parser; - } - - @Override - protected void printAdditionalHelp(Terminal terminal) { - terminal.println("This tool truncates the translog and translog"); - terminal.println("checkpoint files to create a new translog"); - } - - @SuppressForbidden(reason = "Necessary to use the path passed in") - private Path getTranslogPath(OptionSet options) { - return PathUtils.get(translogFolder.value(options), "", ""); - } - - @Override - protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - boolean batch = options.has(batchMode); - - Path translogPath = getTranslogPath(options); - Path idxLocation = translogPath.getParent().resolve("index"); - - if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) { - throw new ElasticsearchException("translog directory [" + translogPath + "], must exist and be a directory"); - } - - if (Files.exists(idxLocation) == false || Files.isDirectory(idxLocation) == false) { - throw new ElasticsearchException("unable to find a shard at [" + idxLocation + "], which must exist and be a directory"); - } - try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE)) { - final String historyUUID = UUIDs.randomBase64UUID(); - final Map commitData; - // Hold the lock open for the duration of the tool running - try (Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - Set translogFiles; - try { - terminal.println("Checking existing translog files"); - translogFiles = filesInDirectory(translogPath); - } catch (IOException e) { - terminal.println("encountered IOException while listing directory, aborting..."); - throw new ElasticsearchException("failed to find existing translog files", e); - } - - // Warn about ES being stopped and files being deleted - warnAboutDeletingFiles(terminal, translogFiles, batch); - - List commits; - try { - terminal.println("Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]"); - commits = DirectoryReader.listCommits(dir); - } catch (IndexNotFoundException infe) { - throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe); - } - - // Retrieve the generation and UUID from the existing data - commitData = new HashMap<>(commits.get(commits.size() - 1).getUserData()); - String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY); - String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY); - final long globalCheckpoint; - // In order to have a safe commit invariant, we have to assign the global checkpoint to the max_seqno of the last commit. - // We can only safely do it because we will generate a new history uuid this shard. - if (commitData.containsKey(SequenceNumbers.MAX_SEQ_NO)) { - globalCheckpoint = Long.parseLong(commitData.get(SequenceNumbers.MAX_SEQ_NO)); - // Also advances the local checkpoint of the last commit to its max_seqno. - commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(globalCheckpoint)); - } else { - globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } - if (translogGeneration == null || translogUUID == null) { - throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]", - translogGeneration, translogUUID); - } - terminal.println("Translog Generation: " + translogGeneration); - terminal.println("Translog UUID : " + translogUUID); - terminal.println("History UUID : " + historyUUID); - - Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME); - Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME); - Path tempEmptyTranslog = translogPath.resolve("temp-" + Translog.TRANSLOG_FILE_PREFIX + - translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); - Path realEmptyTranslog = translogPath.resolve(Translog.TRANSLOG_FILE_PREFIX + - translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); - - // Write empty checkpoint and translog to empty files - long gen = Long.parseLong(translogGeneration); - int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID); - writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen, globalCheckpoint); - - terminal.println("Removing existing translog files"); - IOUtils.rm(translogFiles.toArray(new Path[]{})); - - terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]"); - Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE); - terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]"); - Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE); - - // Fsync the translog directory after rename - IOUtils.fsync(translogPath, true); - } - - terminal.println("Marking index with the new history uuid"); - // commit the new histroy id - IndexWriterConfig iwc = new IndexWriterConfig(null) - .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) - .setCommitOnClose(false) - // we don't want merges to happen here - we call maybe merge on the engine - // later once we stared it up otherwise we would need to wait for it here - // we also don't specify a codec here and merges should use the engines for this index - .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.APPEND); - try (IndexWriter writer = new IndexWriter(dir, iwc)) { - Map newCommitData = new HashMap<>(commitData); - newCommitData.put(Engine.HISTORY_UUID_KEY, historyUUID); - writer.setLiveCommitData(newCommitData.entrySet()); - writer.commit(); - } - } catch (LockObtainFailedException lofe) { - throw new ElasticsearchException("Failed to lock shard's directory at [" + idxLocation + "], is Elasticsearch still running?"); - } - - terminal.println("Done."); - } - - /** Write a checkpoint file to the given location with the given generation */ - static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration, long globalCheckpoint) throws IOException { - Checkpoint emptyCheckpoint = Checkpoint.emptyTranslogCheckpoint(translogLength, translogGeneration, - globalCheckpoint, translogGeneration); - Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, - StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); - // fsync with metadata here to make sure. - IOUtils.fsync(filename, false); - } - - /** - * Write a translog containing the given translog UUID to the given location. Returns the number of bytes written. - */ - public static int writeEmptyTranslog(Path filename, String translogUUID) throws IOException { - try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) { - TranslogHeader header = new TranslogHeader(translogUUID, TranslogHeader.UNKNOWN_PRIMARY_TERM); - header.write(fc); - return header.sizeInBytes(); - } - } - - /** Show a warning about deleting files, asking for a confirmation if {@code batchMode} is false */ - public static void warnAboutDeletingFiles(Terminal terminal, Set files, boolean batchMode) { - terminal.println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); - terminal.println("! WARNING: Elasticsearch MUST be stopped before running this tool !"); - terminal.println("! !"); - terminal.println("! WARNING: Documents inside of translog files will be lost !"); - terminal.println("! !"); - terminal.println("! WARNING: The following files will be DELETED! !"); - terminal.println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); - for (Path file : files) { - terminal.println("--> " + file); - } - terminal.println(""); - if (batchMode == false) { - String text = terminal.readText("Continue and DELETE files? [y/N] "); - if (!text.equalsIgnoreCase("y")) { - throw new ElasticsearchException("aborted by user"); - } - } - } - - /** Return a Set of all files in a given directory */ - public static Set filesInDirectory(Path directory) throws IOException { - Set files = new HashSet<>(); - try (DirectoryStream stream = Files.newDirectoryStream(directory)) { - for (Path file : stream) { - files.add(file); - } - } - return files; - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 9a5df39a970a9..b74b5343a82a1 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -2641,7 +2640,8 @@ public void testIndexCheckOnStartup() throws Exception { final ShardPath shardPath = indexShard.shardPath(); - final Path indexPath = corruptIndexFile(shardPath); + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + CorruptionUtils.corruptIndex(random(), indexPath, false); final AtomicInteger corruptedMarkerCount = new AtomicInteger(); final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { @@ -2750,22 +2750,6 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO assertThat("store still has a single corrupt marker", corruptedMarkerCount.get(), equalTo(1)); } - private Path corruptIndexFile(ShardPath shardPath) throws IOException { - final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); - final Path[] filesToCorrupt = - Files.walk(indexPath) - .filter(p -> { - final String name = p.getFileName().toString(); - return Files.isRegularFile(p) - && name.startsWith("extra") == false // Skip files added by Lucene's ExtrasFS - && IndexWriter.WRITE_LOCK_NAME.equals(name) == false - && name.startsWith("segments_") == false && name.endsWith(".si") == false; - }) - .toArray(Path[]::new); - CorruptionUtils.corruptFile(random(), filesToCorrupt); - return indexPath; - } - /** * Simulates a scenario that happens when we are async fetching snapshot metadata from GatewayService * and checking index concurrently. This should always be possible without any exception. diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java new file mode 100644 index 0000000000000..dc3be31734d5c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -0,0 +1,652 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.store.NativeFSLockFactory; +import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanation; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocationDecision; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; +import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; +import org.elasticsearch.index.MockEngineFactoryPlugin; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.translog.TestTranslog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.CorruptionUtils; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.engine.MockEngineSupport; +import org.elasticsearch.test.transport.MockTransportService; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0) +public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class, MockEngineFactoryPlugin.class, InternalSettingsPlugin.class); + } + + public void testCorruptIndex() throws Exception { + final String node = internalCluster().startNode(); + + final String indexName = "index42"; + assertAcked(prepareCreate(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") + .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum") + )); + + // index some docs in several segments + int numDocs = 0; + for (int k = 0, attempts = randomIntBetween(5, 10); k < attempts; k++) { + final int numExtraDocs = between(10, 100); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numExtraDocs]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(indexName, "type").setSource("foo", "bar"); + } + + numDocs += numExtraDocs; + + indexRandom(false, false, false, Arrays.asList(builders)); + flush(indexName); + } + + logger.info("--> indexed {} docs", numDocs); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final MockTerminal terminal = new MockTerminal(); + final OptionParser parser = command.getParser(); + + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + final OptionSet options = parser.parse("-index", indexName, "-shard-id", "0"); + + // Try running it before the node is stopped (and shard is closed) + try { + command.execute(terminal, options, environment); + fail("expected the command to fail as node is locked"); + } catch (Exception e) { + assertThat(e.getMessage(), + allOf(containsString("Failed to lock node's directory"), + containsString("is Elasticsearch still running ?"))); + } + + final Set indexDirs = getDirs(indexName, ShardPath.INDEX_FOLDER_NAME); + assertThat(indexDirs, hasSize(1)); + + internalCluster().restartNode(node, new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + // Try running it before the shard is corrupted, it should flip out because there is no corruption file marker + try { + command.execute(terminal, options, environment); + fail("expected the command to fail as there is no corruption file marker"); + } catch (Exception e) { + assertThat(e.getMessage(), startsWith("Shard does not seem to be corrupted at")); + } + + CorruptionUtils.corruptIndex(random(), indexDirs.iterator().next(), false); + return super.onNodeStopped(nodeName); + } + }); + + // shard should be failed due to a corrupted index + assertBusy(() -> { + final ClusterAllocationExplanation explanation = + client().admin().cluster().prepareAllocationExplain() + .setIndex(indexName).setShard(0).setPrimary(true) + .get().getExplanation(); + + final ShardAllocationDecision shardAllocationDecision = explanation.getShardAllocationDecision(); + assertThat(shardAllocationDecision.isDecisionTaken(), equalTo(true)); + assertThat(shardAllocationDecision.getAllocateDecision().getAllocationDecision(), + equalTo(AllocationDecision.NO_VALID_SHARD_COPY)); + }); + + internalCluster().restartNode(node, new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + terminal.addTextInput("y"); + command.execute(terminal, options, environment); + + return super.onNodeStopped(nodeName); + } + }); + + waitNoPendingTasksOnAll(); + + String nodeId = null; + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + final DiscoveryNodes nodes = state.nodes(); + for (ObjectObjectCursor cursor : nodes.getNodes()) { + final String name = cursor.value.getName(); + if (name.equals(node)) { + nodeId = cursor.key; + break; + } + } + assertThat(nodeId, notNullValue()); + + logger.info("--> output:\n{}", terminal.getOutput()); + + assertThat(terminal.getOutput(), containsString("allocate_stale_primary")); + assertThat(terminal.getOutput(), containsString("\"node\" : \"" + nodeId + "\"")); + + // there is only _stale_ primary (due to new allocation id) + assertBusy(() -> { + final ClusterAllocationExplanation explanation = + client().admin().cluster().prepareAllocationExplain() + .setIndex(indexName).setShard(0).setPrimary(true) + .get().getExplanation(); + + final ShardAllocationDecision shardAllocationDecision = explanation.getShardAllocationDecision(); + assertThat(shardAllocationDecision.isDecisionTaken(), equalTo(true)); + assertThat(shardAllocationDecision.getAllocateDecision().getAllocationDecision(), + equalTo(AllocationDecision.NO_VALID_SHARD_COPY)); + }); + + client().admin().cluster().prepareReroute() + .add(new AllocateStalePrimaryAllocationCommand(indexName, 0, nodeId, true)) + .get(); + + assertBusy(() -> { + final ClusterAllocationExplanation explanation = + client().admin().cluster().prepareAllocationExplain() + .setIndex(indexName).setShard(0).setPrimary(true) + .get().getExplanation(); + + assertThat(explanation.getCurrentNode(), notNullValue()); + assertThat(explanation.getShardState(), equalTo(ShardRoutingState.STARTED)); + }); + + final Pattern pattern = Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); + final Matcher matcher = pattern.matcher(terminal.getOutput()); + assertThat(matcher.find(), equalTo(true)); + final int expectedNumDocs = numDocs - Integer.parseInt(matcher.group("docs")); + + ensureGreen(indexName); + + assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), expectedNumDocs); + } + + public void testCorruptTranslogTruncation() throws Exception { + internalCluster().startNodes(2, Settings.EMPTY); + + final String node1 = internalCluster().getNodeNames()[0]; + final String node2 = internalCluster().getNodeNames()[1]; + + final String indexName = "test"; + assertAcked(prepareCreate(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") + .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog + .put("index.routing.allocation.exclude._name", node2) + )); + ensureYellow(); + + assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() + .put("index.routing.allocation.exclude._name", (String)null) + )); + ensureGreen(); + + // Index some documents + int numDocsToKeep = randomIntBetween(10, 100); + logger.info("--> indexing [{}] docs to be kept", numDocsToKeep); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(indexName, "type").setSource("foo", "bar"); + } + indexRandom(false, false, false, Arrays.asList(builders)); + flush(indexName); + + disableTranslogFlush(indexName); + // having no extra docs is an interesting case for seq no based recoveries - test it more often + int numDocsToTruncate = randomBoolean() ? 0 : randomIntBetween(0, 100); + logger.info("--> indexing [{}] more doc to be truncated", numDocsToTruncate); + builders = new IndexRequestBuilder[numDocsToTruncate]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(indexName, "type").setSource("foo", "bar"); + } + indexRandom(false, false, false, Arrays.asList(builders)); + Set translogDirs = getDirs(indexName, ShardPath.TRANSLOG_FOLDER_NAME); + + // that's only for 6.x branch for bwc with elasticsearch-translog + final boolean translogOnly = randomBoolean(); + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(translogOnly); + final MockTerminal terminal = new MockTerminal(); + final OptionParser parser = command.getParser(); + + if (randomBoolean() && numDocsToTruncate > 0) { + // flush the replica, so it will have more docs than what the primary will have + Index index = resolveIndex(indexName); + IndexShard replica = internalCluster().getInstance(IndicesService.class, node2).getShardOrNull(new ShardId(index, 0)); + replica.flush(new FlushRequest()); + logger.info("--> performed extra flushing on replica"); + } + + // shut down the replica node to be tested later + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node2)); + + // Corrupt the translog file(s) + logger.info("--> corrupting translog"); + corruptRandomTranslogFiles(indexName); + + // Restart the single node + logger.info("--> restarting node"); + internalCluster().restartRandomDataNode(); + + // all shards should be failed due to a corrupted translog + assertBusy(() -> { + final ClusterAllocationExplanation explanation = + client().admin().cluster().prepareAllocationExplain() + .setIndex(indexName).setShard(0).setPrimary(true) + .get().getExplanation(); + + final UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + assertThat(unassignedInfo.getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); + }); + + // have to shut down primary node - otherwise node lock is present + final InternalTestCluster.RestartCallback callback = + new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + // and we can actually truncate the translog + for (Path translogDir : translogDirs) { + final Path idxLocation = translogDir.getParent().resolve(ShardPath.INDEX_FOLDER_NAME); + assertBusy(() -> { + logger.info("--> checking that lock has been released for {}", idxLocation); + try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); + Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { + // Great, do nothing, we just wanted to obtain the lock + } catch (LockObtainFailedException lofe) { + logger.info("--> failed acquiring lock for {}", idxLocation); + fail("still waiting for lock release at [" + idxLocation + "]"); + } catch (IOException ioe) { + fail("Got an IOException: " + ioe); + } + }); + + final Settings defaultSettings = internalCluster().getDefaultSettings(); + final Environment environment = TestEnvironment.newEnvironment(defaultSettings); + + terminal.addTextInput("y"); + OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); + logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); + command.execute(terminal, options, environment); + logger.info("--> output:\n{}", terminal.getOutput()); + } + + return super.onNodeStopped(nodeName); + } + }; + internalCluster().restartNode(node1, callback); + + String primaryNodeId = null; + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + final DiscoveryNodes nodes = state.nodes(); + for (ObjectObjectCursor cursor : nodes.getNodes()) { + final String name = cursor.value.getName(); + if (name.equals(node1)) { + primaryNodeId = cursor.key; + break; + } + } + assertThat(primaryNodeId, notNullValue()); + + assertThat(terminal.getOutput(), containsString("allocate_stale_primary")); + assertThat(terminal.getOutput(), containsString("\"node\" : \"" + primaryNodeId + "\"")); + + // there is only _stale_ primary (due to new allocation id) + assertBusy(() -> { + final ClusterAllocationExplanation explanation = + client().admin().cluster().prepareAllocationExplain() + .setIndex(indexName).setShard(0).setPrimary(true) + .get().getExplanation(); + + final ShardAllocationDecision shardAllocationDecision = explanation.getShardAllocationDecision(); + assertThat(shardAllocationDecision.isDecisionTaken(), equalTo(true)); + assertThat(shardAllocationDecision.getAllocateDecision().getAllocationDecision(), + equalTo(AllocationDecision.NO_VALID_SHARD_COPY)); + }); + + client().admin().cluster().prepareReroute() + .add(new AllocateStalePrimaryAllocationCommand(indexName, 0, primaryNodeId, true)) + .get(); + + assertBusy(() -> { + final ClusterAllocationExplanation explanation = + client().admin().cluster().prepareAllocationExplain() + .setIndex(indexName).setShard(0).setPrimary(true) + .get().getExplanation(); + + assertThat(explanation.getCurrentNode(), notNullValue()); + assertThat(explanation.getShardState(), equalTo(ShardRoutingState.STARTED)); + }); + + ensureYellow(indexName); + + // Run a search and make sure it succeeds + assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), numDocsToKeep); + + logger.info("--> starting the replica node to test recovery"); + internalCluster().startNode(); + ensureGreen(indexName); + for (String node : internalCluster().nodesInclude(indexName)) { + SearchRequestBuilder q = client().prepareSearch(indexName).setPreference("_only_nodes:" + node).setQuery(matchAllQuery()); + assertHitCount(q.get(), numDocsToKeep); + } + final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(indexName).setActiveOnly(false).get(); + final RecoveryState replicaRecoveryState = recoveryResponse.shardRecoveryStates().get(indexName).stream() + .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); + assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); + // Ensure that the global checkpoint and local checkpoint are restored from the max seqno of the last commit. + final SeqNoStats seqNoStats = getSeqNoStats(indexName, 0); + assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + } + + public void testCorruptTranslogTruncationOfReplica() throws Exception { + internalCluster().startNodes(2, Settings.EMPTY); + + final String node1 = internalCluster().getNodeNames()[0]; + final String node2 = internalCluster().getNodeNames()[1]; + logger.info("--> nodes name: {}, {}", node1, node2); + + final String indexName = "test"; + assertAcked(prepareCreate(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") + .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog + .put("index.routing.allocation.exclude._name", node2) + )); + ensureYellow(); + + assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() + .put("index.routing.allocation.exclude._name", (String)null) + )); + ensureGreen(); + + // Index some documents + int numDocsToKeep = randomIntBetween(0, 100); + logger.info("--> indexing [{}] docs to be kept", numDocsToKeep); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(indexName, "type").setSource("foo", "bar"); + } + indexRandom(false, false, false, Arrays.asList(builders)); + flush(indexName); + disableTranslogFlush(indexName); + // having no extra docs is an interesting case for seq no based recoveries - test it more often + int numDocsToTruncate = randomBoolean() ? 0 : randomIntBetween(0, 100); + logger.info("--> indexing [{}] more docs to be truncated", numDocsToTruncate); + builders = new IndexRequestBuilder[numDocsToTruncate]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(indexName, "type").setSource("foo", "bar"); + } + indexRandom(false, false, false, Arrays.asList(builders)); + final int totalDocs = numDocsToKeep + numDocsToTruncate; + + // sample the replica node translog dirs + final ShardId shardId = new ShardId(resolveIndex(indexName), 0); + final Set translogDirs = getDirs(node2, shardId, ShardPath.TRANSLOG_FOLDER_NAME); + + // stop the cluster nodes. we don't use full restart so the node start up order will be the same + // and shard roles will be maintained + internalCluster().stopRandomDataNode(); + internalCluster().stopRandomDataNode(); + + // Corrupt the translog file(s) + logger.info("--> corrupting translog"); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogDirs); + + // Restart the single node + logger.info("--> starting node"); + internalCluster().startNode(); + + ensureYellow(); + + // Run a search and make sure it succeeds + assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), totalDocs); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final MockTerminal terminal = new MockTerminal(); + final OptionParser parser = command.getParser(); + + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + logger.info("--> node {} stopped", nodeName); + for (Path translogDir : translogDirs) { + final Path idxLocation = translogDir.getParent().resolve(ShardPath.INDEX_FOLDER_NAME); + assertBusy(() -> { + logger.info("--> checking that lock has been released for {}", idxLocation); + try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); + Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { + // Great, do nothing, we just wanted to obtain the lock + } catch (LockObtainFailedException lofe) { + logger.info("--> failed acquiring lock for {}", idxLocation); + fail("still waiting for lock release at [" + idxLocation + "]"); + } catch (IOException ioe) { + fail("Got an IOException: " + ioe); + } + }); + + terminal.addTextInput("y"); + OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); + logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); + command.execute(terminal, options, environment); + logger.info("--> output:\n{}", terminal.getOutput()); + } + + return super.onNodeStopped(nodeName); + } + }); + + logger.info("--> starting the replica node to test recovery"); + internalCluster().startNode(); + ensureGreen(indexName); + for (String node : internalCluster().nodesInclude(indexName)) { + assertHitCount(client().prepareSearch(indexName) + .setPreference("_only_nodes:" + node).setQuery(matchAllQuery()).get(), totalDocs); + } + + final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(indexName).setActiveOnly(false).get(); + final RecoveryState replicaRecoveryState = recoveryResponse.shardRecoveryStates().get(indexName).stream() + .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); + // the replica translog was disabled so it doesn't know what hte global checkpoint is and thus can't do ops based recovery + assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); + // Ensure that the global checkpoint and local checkpoint are restored from the max seqno of the last commit. + final SeqNoStats seqNoStats = getSeqNoStats(indexName, 0); + assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + } + + public void testResolvePath() throws Exception { + final int numOfNodes = randomIntBetween(1, 5); + final List nodeNames = internalCluster().startNodes(numOfNodes, Settings.EMPTY); + + final String indexName = "test" + randomInt(100); + assertAcked(prepareCreate(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numOfNodes - 1) + )); + flush(indexName); + + ensureGreen(indexName); + + final Map nodeNameToNodeId = new HashMap<>(); + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + final DiscoveryNodes nodes = state.nodes(); + for (ObjectObjectCursor cursor : nodes.getNodes()) { + nodeNameToNodeId.put(cursor.value.getName(), cursor.key); + } + + final GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{indexName}, false); + final List iterators = iterableAsArrayList(shardIterators); + final ShardRouting shardRouting = iterators.iterator().next().nextOrNull(); + assertThat(shardRouting, notNullValue()); + final ShardId shardId = shardRouting.shardId(); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final OptionParser parser = command.getParser(); + + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + + final Map indexPathByNodeName = new HashMap<>(); + for (String nodeName : nodeNames) { + final String nodeId = nodeNameToNodeId.get(nodeName); + final Set indexDirs = getDirs(nodeId, shardId, ShardPath.INDEX_FOLDER_NAME); + assertThat(indexDirs, hasSize(1)); + indexPathByNodeName.put(nodeName, indexDirs.iterator().next()); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeName)); + logger.info(" -- stopped {}", nodeName); + } + + for (String nodeName : nodeNames) { + final Path indexPath = indexPathByNodeName.get(nodeName); + final OptionSet options = parser.parse("--dir", indexPath.toAbsolutePath().toString()); + command.findAndProcessShardPath(options, environment, + shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); + } + } + + private Set getDirs(String indexName, String dirSuffix) { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{indexName}, false); + List iterators = iterableAsArrayList(shardIterators); + ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); + ShardRouting shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertTrue(shardRouting.primary()); + assertTrue(shardRouting.assignedToNode()); + String nodeId = shardRouting.currentNodeId(); + ShardId shardId = shardRouting.shardId(); + return getDirs(nodeId, shardId, dirSuffix); + } + + private Set getDirs(String nodeId, ShardId shardId, String dirSuffix) { + final NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get(); + final Set translogDirs = new TreeSet<>(); + final NodeStats nodeStats = nodeStatses.getNodes().get(0); + for (FsInfo.Path fsPath : nodeStats.getFs()) { + final String path = fsPath.getPath(); + final Path p = PathUtils.get(path) + .resolve(NodeEnvironment.INDICES_FOLDER) + .resolve(shardId.getIndex().getUUID()) + .resolve(Integer.toString(shardId.getId())) + .resolve(dirSuffix); + if (Files.isDirectory(p)) { + translogDirs.add(p); + } + } + return translogDirs; + } + + private void corruptRandomTranslogFiles(String indexName) throws IOException { + Set translogDirs = getDirs(indexName, ShardPath.TRANSLOG_FOLDER_NAME); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogDirs); + } + + /** Disables translog flushing for the specified index */ + private static void disableTranslogFlush(String index) { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) + .build(); + client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); + } + + private SeqNoStats getSeqNoStats(String index, int shardId) { + final ShardStats[] shardStats = client().admin().indices() + .prepareStats(index).get() + .getIndices().get(index).getShards(); + return shardStats[shardId].getSeqNoStats(); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java new file mode 100644 index 0000000000000..7b43d42b2c216 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -0,0 +1,409 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import org.apache.lucene.store.BaseDirectoryWrapper; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.InternalEngineFactory; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.TestTranslog; +import org.elasticsearch.index.translog.TranslogCorruptedException; +import org.elasticsearch.test.CorruptionUtils; +import org.elasticsearch.test.DummyShardLock; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; + +public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase { + + private ShardId shardId; + private ShardRouting routing; + private Path dataDir; + private Environment environment; + private Settings settings; + private ShardPath shardPath; + private IndexMetaData indexMetaData; + private IndexShard indexShard; + private Path translogPath; + private Path indexPath; + + @Before + public void setup() throws IOException { + shardId = new ShardId("index0", "_na_", 0); + final String nodeId = randomAlphaOfLength(10); + routing = TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE); + + dataDir = createTempDir(); + + environment = + TestEnvironment.newEnvironment(Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), dataDir) + .putList(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath().toString()).build()); + + // create same directory structure as prod does + final Path path = NodeEnvironment.resolveNodePath(dataDir, 0); + Files.createDirectories(path); + settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path); + shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); + final IndexMetaData.Builder metaData = IndexMetaData.builder(routing.getIndexName()) + .settings(settings) + .primaryTerm(0, randomIntBetween(1, 100)) + .putMapping("_doc", "{ \"properties\": {} }"); + indexMetaData = metaData.build(); + + indexShard = newStartedShard(p -> + newShard(routing, shardPath, indexMetaData, null, null, + new InternalEngineFactory(), () -> { + }, EMPTY_EVENT_LISTENER), + true); + + translogPath = shardPath.resolveTranslog(); + indexPath = shardPath.resolveIndex(); + } + + public void testShardLock() throws Exception { + indexDocs(indexShard, true); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final MockTerminal t = new MockTerminal(); + final OptionParser parser = command.getParser(); + + // Try running it before the shard is closed, it should flip out because it can't acquire the lock + try { + final OptionSet options = parser.parse("-d", indexPath.toString()); + command.execute(t, options, environment); + fail("expected the command to fail not being able to acquire the lock"); + } catch (Exception e) { + assertThat(e.getMessage(), containsString("Failed to lock shard's directory")); + } + + // close shard + closeShards(indexShard); + + // Try running it before the shard is corrupted + try { + final OptionSet options = parser.parse("-d", indexPath.toString()); + command.execute(t, options, environment); + fail("expected the command to fail not being able to find a corrupt file marker"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), startsWith("Shard does not seem to be corrupted at")); + assertThat(t.getOutput(), containsString("Lucene index is clean at")); + } + } + + public void testCorruptedIndex() throws Exception { + final int numDocs = indexDocs(indexShard, true); + + // close shard + closeShards(indexShard); + + final boolean corruptSegments = randomBoolean(); + CorruptionUtils.corruptIndex(random(), indexPath, corruptSegments); + + // test corrupted shard + final IndexShard corruptedShard = reopenIndexShard(true); + allowShardFailures(); + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + closeShards(corruptedShard); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final MockTerminal t = new MockTerminal(); + final OptionParser parser = command.getParser(); + + // run command with dry-run + t.addTextInput("n"); // mean dry run + final OptionSet options = parser.parse("-d", indexPath.toString()); + t.setVerbosity(Terminal.Verbosity.VERBOSE); + try { + command.execute(t, options, environment); + fail(); + } catch (ElasticsearchException e) { + if (corruptSegments) { + assertThat(e.getMessage(), is("Index is unrecoverable")); + } else { + assertThat(e.getMessage(), containsString("aborted by user")); + } + } + + logger.info("--> output:\n{}", t.getOutput()); + + if (corruptSegments == false) { + + // run command without dry-run + t.addTextInput("y"); + command.execute(t, options, environment); + + final String output = t.getOutput(); + logger.info("--> output:\n{}", output); + + // reopen shard + failOnShardFailures(); + final IndexShard newShard = newStartedShard(p -> reopenIndexShard(false), true); + + final Set shardDocUIDs = getShardDocUIDs(newShard); + + final Pattern pattern = Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); + final Matcher matcher = pattern.matcher(output); + assertThat(matcher.find(), equalTo(true)); + final int expectedNumDocs = numDocs - Integer.parseInt(matcher.group("docs")); + + assertThat(shardDocUIDs.size(), equalTo(expectedNumDocs)); + + closeShards(newShard); + } + } + + public void testCorruptedTranslog() throws Exception { + final int numDocsToKeep = indexDocs(indexShard, false); + + // close shard + closeShards(indexShard); + + TestTranslog.corruptRandomTranslogFile(logger, random(), Arrays.asList(translogPath)); + + // test corrupted shard + final IndexShard corruptedShard = reopenIndexShard(true); + + allowShardFailures(); + // it has to fail on start up due to index.shard.check_on_startup = checksum + final Exception exception = expectThrows(Exception.class, () -> newStartedShard(p -> corruptedShard, true)); + final Throwable cause = exception.getCause() instanceof EngineException ? exception.getCause().getCause() : exception.getCause(); + assertThat(cause, instanceOf(TranslogCorruptedException.class)); + + closeShards(corruptedShard); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final MockTerminal t = new MockTerminal(); + final OptionParser parser = command.getParser(); + + final OptionSet options = parser.parse("-d", translogPath.toString()); + // run command with dry-run + t.addTextInput("n"); // mean dry run + t.setVerbosity(Terminal.Verbosity.VERBOSE); + try { + command.execute(t, options, environment); + fail(); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), containsString("aborted by user")); + assertThat(t.getOutput(), containsString("Continue and remove corrupted data from the shard ?")); + } + + logger.info("--> output:\n{}", t.getOutput()); + + // run command without dry-run + t.reset(); + t.addTextInput("y"); + command.execute(t, options, environment); + + final String output = t.getOutput(); + logger.info("--> output:\n{}", output); + + // reopen shard + failOnShardFailures(); + final IndexShard newShard = newStartedShard(p -> reopenIndexShard(false), true); + + final Set shardDocUIDs = getShardDocUIDs(newShard); + + assertThat(shardDocUIDs.size(), equalTo(numDocsToKeep)); + + closeShards(newShard); + } + + public void testCorruptedBothIndexAndTranslog() throws Exception { + // index some docs in several segments + final int numDocsToKeep = indexDocs(indexShard, false); + + // close shard + closeShards(indexShard); + + CorruptionUtils.corruptIndex(random(), indexPath, false); + + // test corrupted shard + final IndexShard corruptedShard = reopenIndexShard(true); + allowShardFailures(); + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + closeShards(corruptedShard); + + TestTranslog.corruptRandomTranslogFile(logger, random(), Arrays.asList(translogPath)); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final MockTerminal t = new MockTerminal(); + final OptionParser parser = command.getParser(); + + final OptionSet options = parser.parse("-d", translogPath.toString()); + // run command with dry-run + t.addTextInput("n"); // mean dry run + t.addTextInput("n"); // mean dry run + t.setVerbosity(Terminal.Verbosity.VERBOSE); + try { + command.execute(t, options, environment); + fail(); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), containsString("aborted by user")); + assertThat(t.getOutput(), containsString("Continue and remove corrupted data from the shard ?")); + } + + logger.info("--> output:\n{}", t.getOutput()); + + // run command without dry-run + t.reset(); + t.addTextInput("y"); + command.execute(t, options, environment); + + final String output = t.getOutput(); + logger.info("--> output:\n{}", output); + + // reopen shard + failOnShardFailures(); + final IndexShard newShard = newStartedShard(p -> reopenIndexShard(false), true); + + final Set shardDocUIDs = getShardDocUIDs(newShard); + + final Pattern pattern = Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); + final Matcher matcher = pattern.matcher(output); + assertThat(matcher.find(), equalTo(true)); + final int expectedNumDocs = numDocsToKeep - Integer.parseInt(matcher.group("docs")); + + assertThat(shardDocUIDs.size(), equalTo(expectedNumDocs)); + + closeShards(newShard); + } + + public void testResolveIndexDirectory() throws Exception { + // index a single doc to have files on a disk + indexDoc(indexShard, "_doc", "0", "{}"); + flushShard(indexShard, true); + writeIndexState(); + + // close shard + closeShards(indexShard); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final OptionParser parser = command.getParser(); + + // `--index index_name --shard-id 0` has to be resolved to indexPath + final OptionSet options = parser.parse("--index", shardId.getIndex().getName(), + "--shard-id", Integer.toString(shardId.id())); + + command.findAndProcessShardPath(options, environment, + shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); + + final OptionSet options2 = parser.parse("--dir", indexPath.toAbsolutePath().toString()); + command.findAndProcessShardPath(options2, environment, + shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); + } + + private IndexShard reopenIndexShard(boolean corrupted) throws IOException { + // open shard with the same location + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.ExistingStoreRecoverySource.INSTANCE + ); + + final IndexMetaData metaData = IndexMetaData.builder(indexMetaData) + .settings(Settings.builder() + .put(indexShard.indexSettings().getSettings()) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum")) + .build(); + + CheckedFunction storeProvider = + corrupted == false ? null : + indexSettings -> { + final ShardId shardId = shardPath.getShardId(); + final BaseDirectoryWrapper baseDirectoryWrapper = newFSDirectory(shardPath.resolveIndex()); + // index is corrupted - don't even try to check index on close - it fails + baseDirectoryWrapper.setCheckIndexOnClose(false); + return new Store(shardId, indexSettings, baseDirectoryWrapper, new DummyShardLock(shardId)); + }; + + return newShard(shardRouting, shardPath, metaData, storeProvider, null, + indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + } + + private int indexDocs(IndexShard indexShard, boolean flushLast) throws IOException { + // index some docs in several segments + int numDocs = 0; + int numDocsToKeep = 0; + for (int i = 0, attempts = randomIntBetween(5, 10); i < attempts; i++) { + final int numExtraDocs = between(10, 100); + for (long j = 0; j < numExtraDocs; j++) { + indexDoc(indexShard, "_doc", Long.toString(numDocs + j), "{}"); + } + numDocs += numExtraDocs; + + if (flushLast || i < attempts - 1) { + numDocsToKeep += numExtraDocs; + flushShard(indexShard, true); + } + } + + logger.info("--> indexed {} docs, {} to keep", numDocs, numDocsToKeep); + + writeIndexState(); + return numDocsToKeep; + } + + private void writeIndexState() throws IOException { + // create _state of IndexMetaData + try(NodeEnvironment nodeEnvironment = new NodeEnvironment(environment.settings(), environment, nId -> {})) { + final Path[] paths = nodeEnvironment.indexPaths(indexMetaData.getIndex()); + IndexMetaData.FORMAT.write(indexMetaData, paths); + logger.info("--> index metadata persisted to {} ", Arrays.toString(paths)); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index 7d548fc42d695..71284792a6817 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -125,7 +125,7 @@ private void corruptRandomTranslogFile() throws IOException { } } Path translogDir = RandomPicks.randomFrom(random(), translogDirs); - TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, TestTranslog.minTranslogGenUsedInRecovery(translogDir)); + TestTranslog.corruptRandomTranslogFile(logger, random(), Arrays.asList(translogDir)); } /** Disables translog flushing for the specified index */ diff --git a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java index f37ec5a8e55d5..0e114233856c0 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java @@ -34,6 +34,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.Collection; import java.util.List; import java.util.Random; import java.util.Set; @@ -52,13 +53,19 @@ public class TestTranslog { static final Pattern TRANSLOG_FILE_PATTERN = Pattern.compile("translog-(\\d+)\\.tlog"); + public static void corruptRandomTranslogFile(Logger logger, Random random, Collection translogDirs) throws IOException { + for (Path translogDir : translogDirs) { + final long minTranslogGen = minTranslogGenUsedInRecovery(translogDir); + corruptRandomTranslogFile(logger, random, translogDir, minTranslogGen); + } + } + /** * Corrupts random translog file (translog-N.tlog) from the given translog directory. * * @return a translog file which has been corrupted. */ - public static Path corruptRandomTranslogFile(Logger logger, Random random, Path translogDir, long minGeneration) throws - IOException { + public static Path corruptRandomTranslogFile(Logger logger, Random random, Path translogDir, long minGeneration) throws IOException { Set candidates = new TreeSet<>(); // TreeSet makes sure iteration order is deterministic logger.info("--> Translog dir [{}], minUsedTranslogGen [{}]", translogDir, minGeneration); try (DirectoryStream stream = Files.newDirectoryStream(translogDir)) { diff --git a/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java b/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java deleted file mode 100644 index cd4605b7e2d01..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.store.Lock; -import org.apache.lucene.store.LockObtainFailedException; -import org.apache.lucene.store.NativeFSLockFactory; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.MockEngineFactoryPlugin; -import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.monitor.fs.FsInfo; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.engine.MockEngineSupport; -import org.elasticsearch.test.transport.MockTransportService; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0) -public class TruncateTranslogIT extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, MockEngineFactoryPlugin.class); - } - - public void testCorruptTranslogTruncation() throws Exception { - internalCluster().startNodes(2, Settings.EMPTY); - - final String replicaNode = internalCluster().getNodeNames()[1]; - - assertAcked(prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .put("index.refresh_interval", "-1") - .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog - .put("index.routing.allocation.exclude._name", replicaNode) - )); - ensureYellow(); - - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put("index.routing.allocation.exclude._name", (String)null) - )); - ensureGreen(); - - // Index some documents - int numDocsToKeep = randomIntBetween(0, 100); - logger.info("--> indexing [{}] docs to be kept", numDocsToKeep); - IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep]; - for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar"); - } - indexRandom(false, false, false, Arrays.asList(builders)); - flush("test"); - disableTranslogFlush("test"); - // having no extra docs is an interesting case for seq no based recoveries - test it more often - int numDocsToTruncate = randomBoolean() ? 0 : randomIntBetween(0, 100); - logger.info("--> indexing [{}] more doc to be truncated", numDocsToTruncate); - builders = new IndexRequestBuilder[numDocsToTruncate]; - for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar"); - } - indexRandom(false, false, false, Arrays.asList(builders)); - Set translogDirs = getTranslogDirs("test"); - - TruncateTranslogCommand ttc = new TruncateTranslogCommand(); - MockTerminal t = new MockTerminal(); - OptionParser parser = ttc.getParser(); - - for (Path translogDir : translogDirs) { - OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString(), "-b"); - // Try running it before the shard is closed, it should flip out because it can't acquire the lock - try { - logger.info("--> running truncate while index is open on [{}]", translogDir.toAbsolutePath()); - ttc.execute(t, options, null /* TODO: env should be real here, and ttc should actually use it... */); - fail("expected the truncate command to fail not being able to acquire the lock"); - } catch (Exception e) { - assertThat(e.getMessage(), containsString("Failed to lock shard's directory")); - } - } - - if (randomBoolean() && numDocsToTruncate > 0) { - // flush the replica, so it will have more docs than what the primary will have - Index index = resolveIndex("test"); - IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaNode).getShardOrNull(new ShardId(index, 0)); - replica.flush(new FlushRequest()); - logger.info("--> performed extra flushing on replica"); - } - - // shut down the replica node to be tested later - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); - - // Corrupt the translog file - logger.info("--> corrupting translog"); - corruptRandomTranslogFile("test"); - - // Restart the single node - logger.info("--> restarting node"); - internalCluster().restartRandomDataNode(); - client().admin().cluster().prepareHealth().setWaitForYellowStatus() - .setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)) - .setWaitForEvents(Priority.LANGUID) - .get(); - - try { - client().prepareSearch("test").setQuery(matchAllQuery()).get(); - fail("all shards should be failed due to a corrupted translog"); - } catch (SearchPhaseExecutionException e) { - // Good, all shards should be failed because there is only a - // single shard and its translog is corrupt - } - - // Close the index so we can actually truncate the translog - logger.info("--> closing 'test' index"); - client().admin().indices().prepareClose("test").get(); - - for (Path translogDir : translogDirs) { - final Path idxLocation = translogDir.getParent().resolve("index"); - assertBusy(() -> { - logger.info("--> checking that lock has been released for {}", idxLocation); - try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); - Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - // Great, do nothing, we just wanted to obtain the lock - } catch (LockObtainFailedException lofe) { - logger.info("--> failed acquiring lock for {}", idxLocation); - fail("still waiting for lock release at [" + idxLocation + "]"); - } catch (IOException ioe) { - fail("Got an IOException: " + ioe); - } - }); - - OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString(), "-b"); - logger.info("--> running truncate translog command for [{}]", translogDir.toAbsolutePath()); - ttc.execute(t, options, null /* TODO: env should be real here, and ttc should actually use it... */); - logger.info("--> output:\n{}", t.getOutput()); - } - - // Re-open index - logger.info("--> opening 'test' index"); - client().admin().indices().prepareOpen("test").get(); - ensureYellow("test"); - - // Run a search and make sure it succeeds - assertHitCount(client().prepareSearch("test").setQuery(matchAllQuery()).get(), numDocsToKeep); - - logger.info("--> starting the replica node to test recovery"); - internalCluster().startNode(); - ensureGreen("test"); - for (String node : internalCluster().nodesInclude("test")) { - SearchRequestBuilder q = client().prepareSearch("test").setPreference("_only_nodes:" + node).setQuery(matchAllQuery()); - assertHitCount(q.get(), numDocsToKeep); - } - final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").setActiveOnly(false).get(); - final RecoveryState replicaRecoveryState = recoveryResponse.shardRecoveryStates().get("test").stream() - .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); - assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); - // Ensure that the global checkpoint and local checkpoint are restored from the max seqno of the last commit. - final SeqNoStats seqNoStats = getSeqNoStats("test", 0); - assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); - assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); - } - - public void testCorruptTranslogTruncationOfReplica() throws Exception { - internalCluster().startNodes(2, Settings.EMPTY); - - final String primaryNode = internalCluster().getNodeNames()[0]; - final String replicaNode = internalCluster().getNodeNames()[1]; - - assertAcked(prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .put("index.refresh_interval", "-1") - .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog - .put("index.routing.allocation.exclude._name", replicaNode) - )); - ensureYellow(); - - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put("index.routing.allocation.exclude._name", (String)null) - )); - ensureGreen(); - - // Index some documents - int numDocsToKeep = randomIntBetween(0, 100); - logger.info("--> indexing [{}] docs to be kept", numDocsToKeep); - IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep]; - for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar"); - } - indexRandom(false, false, false, Arrays.asList(builders)); - flush("test"); - disableTranslogFlush("test"); - // having no extra docs is an interesting case for seq no based recoveries - test it more often - int numDocsToTruncate = randomBoolean() ? 0 : randomIntBetween(0, 100); - logger.info("--> indexing [{}] more docs to be truncated", numDocsToTruncate); - builders = new IndexRequestBuilder[numDocsToTruncate]; - for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar"); - } - indexRandom(false, false, false, Arrays.asList(builders)); - final int totalDocs = numDocsToKeep + numDocsToTruncate; - - - // sample the replica node translog dirs - final ShardId shardId = new ShardId(resolveIndex("test"), 0); - Set translogDirs = getTranslogDirs(replicaNode, shardId); - Path tdir = randomFrom(translogDirs); - - // stop the cluster nodes. we don't use full restart so the node start up order will be the same - // and shard roles will be maintained - internalCluster().stopRandomDataNode(); - internalCluster().stopRandomDataNode(); - - // Corrupt the translog file - logger.info("--> corrupting translog"); - TestTranslog.corruptRandomTranslogFile(logger, random(), tdir, TestTranslog.minTranslogGenUsedInRecovery(tdir)); - - // Restart the single node - logger.info("--> starting node"); - internalCluster().startNode(); - - ensureYellow(); - - // Run a search and make sure it succeeds - assertHitCount(client().prepareSearch("test").setQuery(matchAllQuery()).get(), totalDocs); - - TruncateTranslogCommand ttc = new TruncateTranslogCommand(); - MockTerminal t = new MockTerminal(); - OptionParser parser = ttc.getParser(); - - for (Path translogDir : translogDirs) { - final Path idxLocation = translogDir.getParent().resolve("index"); - assertBusy(() -> { - logger.info("--> checking that lock has been released for {}", idxLocation); - try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); - Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - // Great, do nothing, we just wanted to obtain the lock - } catch (LockObtainFailedException lofe) { - logger.info("--> failed acquiring lock for {}", idxLocation); - fail("still waiting for lock release at [" + idxLocation + "]"); - } catch (IOException ioe) { - fail("Got an IOException: " + ioe); - } - }); - - OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString(), "-b"); - logger.info("--> running truncate translog command for [{}]", translogDir.toAbsolutePath()); - ttc.execute(t, options, null /* TODO: env should be real here, and ttc should actually use it... */); - logger.info("--> output:\n{}", t.getOutput()); - } - - logger.info("--> starting the replica node to test recovery"); - internalCluster().startNode(); - ensureGreen("test"); - for (String node : internalCluster().nodesInclude("test")) { - assertHitCount(client().prepareSearch("test").setPreference("_only_nodes:" + node).setQuery(matchAllQuery()).get(), totalDocs); - } - - final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").setActiveOnly(false).get(); - final RecoveryState replicaRecoveryState = recoveryResponse.shardRecoveryStates().get("test").stream() - .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); - // the replica translog was disabled so it doesn't know what hte global checkpoint is and thus can't do ops based recovery - assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); - // Ensure that the global checkpoint and local checkpoint are restored from the max seqno of the last commit. - final SeqNoStats seqNoStats = getSeqNoStats("test", 0); - assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); - assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); - } - - private Set getTranslogDirs(String indexName) throws IOException { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{indexName}, false); - List iterators = iterableAsArrayList(shardIterators); - ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); - ShardRouting shardRouting = shardIterator.nextOrNull(); - assertNotNull(shardRouting); - assertTrue(shardRouting.primary()); - assertTrue(shardRouting.assignedToNode()); - String nodeId = shardRouting.currentNodeId(); - ShardId shardId = shardRouting.shardId(); - return getTranslogDirs(nodeId, shardId); - } - - private Set getTranslogDirs(String nodeId, ShardId shardId) { - NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get(); - Set translogDirs = new TreeSet<>(); // treeset makes sure iteration order is deterministic - for (FsInfo.Path fsPath : nodeStatses.getNodes().get(0).getFs()) { - String path = fsPath.getPath(); - final String relativeDataLocationPath = "indices/"+ shardId.getIndex().getUUID() +"/" + Integer.toString(shardId.getId()) - + "/translog"; - Path translogPath = PathUtils.get(path).resolve(relativeDataLocationPath); - if (Files.isDirectory(translogPath)) { - translogDirs.add(translogPath); - } - } - return translogDirs; - } - - private void corruptRandomTranslogFile(String indexName) throws IOException { - Set translogDirs = getTranslogDirs(indexName); - Path translogDir = randomFrom(translogDirs); - TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, TestTranslog.minTranslogGenUsedInRecovery(translogDir)); - } - - /** Disables translog flushing for the specified index */ - private static void disableTranslogFlush(String index) { - Settings settings = Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) - .build(); - client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); - } - - private SeqNoStats getSeqNoStats(String index, int shardId) { - final ShardStats[] shardStats = client().admin().indices() - .prepareStats(index).get() - .getIndices().get(index).getShards(); - return shardStats[shardId].getSeqNoStats(); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 78ce5bc500ce8..540b68ee40932 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -133,7 +133,7 @@ public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(getClass().getName(), threadPoolSettings()); primaryTerm = randomIntBetween(1, 100); // use random but fixed term for creating shards - failOnShardFailures.set(true); + failOnShardFailures(); } @Override @@ -154,6 +154,10 @@ protected void allowShardFailures() { failOnShardFailures.set(false); } + protected void failOnShardFailures() { + failOnShardFailures.set(true); + } + public Settings threadPoolSettings() { return Settings.EMPTY; } @@ -233,7 +237,7 @@ protected IndexShard newShard( .settings(indexSettings) .primaryTerm(0, primaryTerm) .putMapping("_doc", "{ \"properties\": {} }"); - return newShard(shardRouting, metaData.build(), engineFactory, listeners); + return newShard(shardRouting, metaData.build(), null, engineFactory, () -> {}, listeners); } /** @@ -279,7 +283,6 @@ protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, I return newShard(shardRouting, indexMetaData, searcherWrapper, new InternalEngineFactory(), globalCheckpointSyncer); } - /** * creates a new initializing shard. The shard will will be put in its proper path under the * current node id the shard is assigned to. diff --git a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java index df306dfc9e32c..59c4c942fe259 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java @@ -21,6 +21,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; @@ -47,6 +48,23 @@ public final class CorruptionUtils { private static Logger logger = ESLoggerFactory.getLogger("test"); private CorruptionUtils() {} + public static void corruptIndex(Random random, Path indexPath, boolean corruptSegments) throws IOException { + // corrupt files + final Path[] filesToCorrupt = + Files.walk(indexPath) + .filter(p -> { + final String name = p.getFileName().toString(); + boolean segmentFile = name.startsWith("segments_") || name.endsWith(".si"); + return Files.isRegularFile(p) + && name.startsWith("extra") == false // Skip files added by Lucene's ExtrasFS + && IndexWriter.WRITE_LOCK_NAME.equals(name) == false + && (corruptSegments ? segmentFile : segmentFile == false); + } + ) + .toArray(Path[]::new); + corruptFile(random, filesToCorrupt); + } + /** * Corrupts a random file at a random position */ From 6f3b3338ba73f1ebc50836758b772d7c57b8852f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 19 Sep 2018 10:34:53 +0200 Subject: [PATCH 42/46] [Docs] Clarify accessing Date methods in painless (#33560) The documentation currently tells users to use `doc['event_date'].value.getMillis` to access milliseconds in a date. It turns out the way it works is `doc['event_date'].value.millis`. This change corrects this and gives a hint at how other date related methods work. --- docs/painless/painless-getting-started.asciidoc | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index b325fcf5549cc..936bd8e198c72 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -198,13 +198,11 @@ POST hockey/player/1/_update ==== Dates Date fields are exposed as -`ReadableDateTime` or -so they support methods like -`getYear`, -and `getDayOfWeek`. -To get milliseconds since epoch call -`getMillis`. -For example, the following returns every hockey player's birth year: +`ReadableDateTime`, so they support methods like `getYear`, `getDayOfWeek` +or e.g. getting milliseconds since epoch with `getMillis`. To use these +in a script, leave out the `get` prefix and continue with lowercasing the +rest of the method name. For example, the following returns every hockey +player's birth year: [source,js] ---------------------------------------------------------------- From 0c77f45dc66a448fc37e5cf8a1984282083fc28f Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 19 Sep 2018 11:03:11 +0200 Subject: [PATCH 43/46] Move DocsStats into Engine (#33835) By moving DocStats into the engine we can easily cache the stats for read-only engines if necessary. It also moves the responsibility out of IndexShard which has quiet some complexity already. --- .../elasticsearch/index/engine/Engine.java | 36 +++++++++++++++++++ .../index/engine/ReadOnlyEngine.java | 8 +++++ .../elasticsearch/index/shard/IndexShard.java | 33 ++--------------- .../index/shard/IndexShardTests.java | 14 +++++++- 4 files changed, 60 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index fc693113fee53..f513a8577b6a1 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -66,6 +66,7 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -175,6 +176,41 @@ public MergeStats getMergeStats() { /** Returns how many bytes we are currently moving from heap to disk */ public abstract long getWritingBytes(); + /** + * Returns the {@link DocsStats} for this engine + */ + public DocsStats docStats() { + // we calculate the doc stats based on the internal reader that is more up-to-date and not subject + // to external refreshes. For instance we don't refresh an external reader if we flush and indices with + // index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics + // when indexing but not refreshing in general. Yet, if a refresh happens the internal reader is refresh as well so we are + // safe here. + try (Engine.Searcher searcher = acquireSearcher("docStats", Engine.SearcherScope.INTERNAL)) { + return docsStats(searcher.reader()); + } + } + + protected final DocsStats docsStats(IndexReader indexReader) { + long numDocs = 0; + long numDeletedDocs = 0; + long sizeInBytes = 0; + // we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause + // the next scheduled refresh to go through and refresh the stats as well + for (LeafReaderContext readerContext : indexReader.leaves()) { + // we go on the segment level here to get accurate numbers + final SegmentReader segmentReader = Lucene.segmentReader(readerContext.reader()); + SegmentCommitInfo info = segmentReader.getSegmentInfo(); + numDocs += readerContext.reader().numDocs(); + numDeletedDocs += readerContext.reader().numDeletedDocs(); + try { + sizeInBytes += info.sizeInBytes(); + } catch (IOException e) { + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + } + } + return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); + } + /** * A throttling class that can be activated, causing the * {@code acquireThrottle} method to block on a lock when throttling diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index b958bd84b76a6..80b653939299f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogStats; @@ -63,6 +64,7 @@ public final class ReadOnlyEngine extends Engine { private final SearcherManager searcherManager; private final IndexCommit indexCommit; private final Lock indexWriterLock; + private final DocsStats docsStats; /** * Creates a new ReadOnlyEngine. This ctor can also be used to open a read-only engine on top of an already opened @@ -101,6 +103,7 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats this.indexCommit = reader.getIndexCommit(); this.searcherManager = new SearcherManager(reader, new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService())); + this.docsStats = docsStats(reader); this.indexWriterLock = indexWriterLock; success = true; } finally { @@ -365,4 +368,9 @@ public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) { @Override public void maybePruneDeletes() { } + + @Override + public DocsStats docStats() { + return docsStats; + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 168444a226750..51549b439a37b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -21,13 +21,9 @@ import com.carrotsearch.hppc.ObjectLongMap; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; @@ -879,32 +875,9 @@ public FlushStats flushStats() { } public DocsStats docStats() { - // we calculate the doc stats based on the internal reader that is more up-to-date and not subject - // to external refreshes. For instance we don't refresh an external reader if we flush and indices with - // index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics - // when indexing but not refreshing in general. Yet, if a refresh happens the internal reader is refresh as well so we are - // safe here. - long numDocs = 0; - long numDeletedDocs = 0; - long sizeInBytes = 0; - try (Engine.Searcher searcher = acquireSearcher("docStats", Engine.SearcherScope.INTERNAL)) { - // we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause - // the next scheduled refresh to go through and refresh the stats as well - markSearcherAccessed(); - for (LeafReaderContext reader : searcher.reader().leaves()) { - // we go on the segment level here to get accurate numbers - final SegmentReader segmentReader = Lucene.segmentReader(reader.reader()); - SegmentCommitInfo info = segmentReader.getSegmentInfo(); - numDocs += reader.reader().numDocs(); - numDeletedDocs += reader.reader().numDeletedDocs(); - try { - sizeInBytes += info.sizeInBytes(); - } catch (IOException e) { - logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); - } - } - } - return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); + DocsStats docsStats = getEngine().docStats(); + markSearcherAccessed(); + return docsStats; } /** diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b74b5343a82a1..c1803619ed5f3 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2438,7 +2438,7 @@ public void testRecoverFromLocalShard() throws IOException { closeShards(sourceShard, targetShard); } - public void testDocStats() throws IOException, InterruptedException { + public void testDocStats() throws Exception { IndexShard indexShard = null; try { indexShard = newStartedShard( @@ -2455,7 +2455,14 @@ public void testDocStats() throws IOException, InterruptedException { indexShard.flush(new FlushRequest()); } { + IndexShard shard = indexShard; + assertBusy(() -> { + ThreadPool threadPool = shard.getThreadPool(); + assertThat(threadPool.relativeTimeInMillis(), greaterThan(shard.getLastSearcherAccess())); + }); + long prevAccessTime = shard.getLastSearcherAccess(); final DocsStats docsStats = indexShard.docStats(); + assertThat("searcher was not marked as accessed", shard.getLastSearcherAccess(), greaterThan(prevAccessTime)); assertThat(docsStats.getCount(), equalTo(numDocs)); try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) { assertTrue(searcher.reader().numDocs() <= docsStats.getCount()); @@ -3412,4 +3419,9 @@ public void testResetEngine() throws Exception { assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(translogStats.estimatedNumberOfOperations())); closeShard(shard, false); } + + @Override + public Settings threadPoolSettings() { + return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.estimated_time_interval", "5ms").build(); + } } From 68c0a295787f0f0549b01738740372c0abe4e3a0 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 19 Sep 2018 10:20:21 +0100 Subject: [PATCH 44/46] HLRC: Delete ML calendar (#33775) --- .../client/MLRequestConverters.java | 12 ++++ .../client/MachineLearningClient.java | 41 ++++++++++++ .../client/ml/DeleteCalendarRequest.java | 65 +++++++++++++++++++ .../client/MLRequestConvertersTests.java | 8 +++ .../client/MachineLearningIT.java | 19 ++++++ .../MlClientDocumentationIT.java | 47 ++++++++++++++ .../client/ml/DeleteCalendarRequestTests.java | 43 ++++++++++++ .../high-level/ml/delete-calendar.asciidoc | 59 +++++++++++++++++ .../high-level/supported-apis.asciidoc | 2 + 9 files changed, 296 insertions(+) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteCalendarRequest.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteCalendarRequestTests.java create mode 100644 docs/java-rest/high-level/ml/delete-calendar.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index bc2ff7b17d57b..ed83e1b4aba19 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -28,6 +28,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.client.RequestConverters.EndpointBuilder; import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; @@ -372,4 +373,15 @@ static Request getCalendars(GetCalendarsRequest getCalendarsRequest) throws IOEx request.setEntity(createEntity(getCalendarsRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } + + static Request deleteCalendar(DeleteCalendarRequest deleteCalendarRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("calendars") + .addPathPart(deleteCalendarRequest.getCalendarId()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 5edb5115d857a..06df9b314886d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; @@ -910,4 +911,44 @@ public void putCalendarAsync(PutCalendarRequest request, RequestOptions options, listener, Collections.emptySet()); } + + /** + * Deletes the given Machine Learning Calendar + *

+ * For additional info see + * + * ML Delete calendar documentation + * + * @param request The request to delete the calendar + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return action acknowledgement + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public AcknowledgedResponse deleteCalendar(DeleteCalendarRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::deleteCalendar, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Deletes the given Machine Learning Job asynchronously and notifies the listener on completion + *

+ * For additional info see + * + * ML Delete calendar documentation + * + * @param request The request to delete the calendar + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void deleteCalendarAsync(DeleteCalendarRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::deleteCalendar, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteCalendarRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteCalendarRequest.java new file mode 100644 index 0000000000000..047561685fbfb --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteCalendarRequest.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; + +import java.util.Objects; + +/** + * Request to delete a Machine Learning Calendar + */ +public class DeleteCalendarRequest extends ActionRequest { + + private final String calendarId; + + /** + * The constructor requires a single calendar id. + * @param calendarId The calendar to delete. Must be {@code non-null} + */ + public DeleteCalendarRequest(String calendarId) { + this.calendarId = Objects.requireNonNull(calendarId, "[calendar_id] must not be null"); + } + + public String getCalendarId() { + return calendarId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(calendarId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + DeleteCalendarRequest other = (DeleteCalendarRequest) obj; + return Objects.equals(calendarId, other.calendarId); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index fdd4200ee81b9..819e2f634494d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -24,6 +24,7 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; @@ -438,6 +439,13 @@ public void testGetCalendars() throws IOException { } } + public void testDeleteCalendar() { + DeleteCalendarRequest deleteCalendarRequest = new DeleteCalendarRequest(randomAlphaOfLength(10)); + Request request = MLRequestConverters.deleteCalendar(deleteCalendarRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/calendars/" + deleteCalendarRequest.getCalendarId(), request.getEndpoint()); + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index e90d541b9c79a..19ca737d6e962 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; @@ -517,6 +518,24 @@ public void testGetCalendars() throws Exception { assertEquals(calendar1, getCalendarsResponse.calendars().get(0)); } + public void testDeleteCalendar() throws IOException { + Calendar calendar = CalendarTests.testInstance(); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + execute(new PutCalendarRequest(calendar), machineLearningClient::putCalendar, + machineLearningClient::putCalendarAsync); + + AcknowledgedResponse response = execute(new DeleteCalendarRequest(calendar.getId()), + machineLearningClient::deleteCalendar, + machineLearningClient::deleteCalendarAsync); + assertTrue(response.isAcknowledged()); + + // calendar is missing + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(new DeleteCalendarRequest(calendar.getId()), machineLearningClient::deleteCalendar, + machineLearningClient::deleteCalendarAsync)); + assertThat(exception.status().getStatus(), equalTo(404)); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index ddaf9d8db6cc8..36d5a08d6d368 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; @@ -1591,4 +1592,50 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testDeleteCalendar() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + Calendar calendar = new Calendar("holidays", Collections.singletonList("job_1"), "A calendar for public holidays"); + PutCalendarRequest putCalendarRequest = new PutCalendarRequest(calendar); + client.machineLearning().putCalendar(putCalendarRequest, RequestOptions.DEFAULT); + + //tag::x-pack-ml-delete-calendar-request + DeleteCalendarRequest request = new DeleteCalendarRequest("holidays"); // <1> + //end::x-pack-ml-delete-calendar-request + + //tag::x-pack-ml-delete-calendar-execute + AcknowledgedResponse response = client.machineLearning().deleteCalendar(request, RequestOptions.DEFAULT); + //end::x-pack-ml-delete-calendar-execute + + //tag::x-pack-ml-delete-calendar-response + boolean isAcknowledged = response.isAcknowledged(); // <1> + //end::x-pack-ml-delete-calendar-response + + assertTrue(isAcknowledged); + + // tag::x-pack-ml-delete-calendar-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-delete-calendar-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-delete-calendar-execute-async + client.machineLearning().deleteCalendarAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-delete-calendar-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteCalendarRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteCalendarRequestTests.java new file mode 100644 index 0000000000000..850fd800c9a8e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteCalendarRequestTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + + +public class DeleteCalendarRequestTests extends ESTestCase { + + public void testWithNullId() { + NullPointerException ex = expectThrows(NullPointerException.class, () -> new DeleteCalendarRequest(null)); + assertEquals("[calendar_id] must not be null", ex.getMessage()); + } + + public void testEqualsAndHash() { + String id1 = randomAlphaOfLength(8); + String id2 = id1 + "_a"; + assertThat(new DeleteCalendarRequest(id1), equalTo(new DeleteCalendarRequest(id1))); + assertThat(new DeleteCalendarRequest(id1).hashCode(), equalTo(new DeleteCalendarRequest(id1).hashCode())); + assertThat(new DeleteCalendarRequest(id1), not(equalTo(new DeleteCalendarRequest(id2)))); + assertThat(new DeleteCalendarRequest(id1).hashCode(), not(equalTo(new DeleteCalendarRequest(id2).hashCode()))); + } +} diff --git a/docs/java-rest/high-level/ml/delete-calendar.asciidoc b/docs/java-rest/high-level/ml/delete-calendar.asciidoc new file mode 100644 index 0000000000000..8f25576a96f14 --- /dev/null +++ b/docs/java-rest/high-level/ml/delete-calendar.asciidoc @@ -0,0 +1,59 @@ +[[java-rest-high-x-pack-ml-delete-calendar]] +=== Delete Calendar API +Delete a {ml} calendar. +The API accepts a `DeleteCalendarRequest` and responds +with a `AcknowledgedResponse` object. + +[[java-rest-high-x-pack-ml-delete-calendar-request]] +==== Delete Calendar Request + +A `DeleteCalendar` object requires a non-null `calendarId`. + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-request] +--------------------------------------------------- +<1> Constructing a new request referencing an existing Calendar + +[[java-rest-high-x-pack-ml-delete-calendar-response]] +==== Delete Calendar Response + +The returned `AcknowledgedResponse` object indicates the acknowledgement of the request: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-response] +--------------------------------------------------- +<1> `isAcknowledged` was the deletion request acknowledged or not + +[[java-rest-high-x-pack-ml-delete-calendar-execution]] +==== Execution +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-delete-calendar-async]] +==== Delete Calendar Asynchronously + +This request can also be made asynchronously. +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-execute-async] +--------------------------------------------------- +<1> The `DeleteCalendarRequest` to execute and the `ActionListener` to alert on completion or error. + +The deletion request returns immediately. Once the request is completed, the `ActionListener` is +called back using the `onResponse` or `onFailure`. The latter indicates some failure occurred when +making the request. + +A typical listener for a `DeleteCalendarRequest` could be defined as follows: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-listener] +--------------------------------------------------- +<1> The action to be taken when it is completed +<2> What to do when a failure occurs diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 2c907dd205376..51d00c403de62 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -233,6 +233,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <> * <> * <> +* <> include::ml/put-job.asciidoc[] include::ml/get-job.asciidoc[] @@ -255,6 +256,7 @@ include::ml/get-influencers.asciidoc[] include::ml/get-categories.asciidoc[] include::ml/get-calendars.asciidoc[] include::ml/put-calendar.asciidoc[] +include::ml/delete-calendar.asciidoc[] == Migration APIs From d9947c631a9d398172b717b7864e8ffbcebcdb3c Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 19 Sep 2018 13:13:20 +0200 Subject: [PATCH 45/46] [CCR] Rename idle_shard_retry_delay to poll_timout in auto follow patterns (#33821) --- .../ccr/action/AutoFollowCoordinator.java | 2 +- .../TransportPutAutoFollowPatternAction.java | 2 +- .../xpack/ccr/action/AutoFollowTests.java | 6 ++-- .../PutAutoFollowPatternRequestTests.java | 2 +- .../xpack/core/ccr/AutoFollowMetadata.java | 28 +++++++++---------- .../action/PutAutoFollowPatternAction.java | 28 +++++++++---------- 6 files changed, 34 insertions(+), 34 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 46679d22520c3..78d21683958de 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -306,7 +306,7 @@ private void followLeaderIndex(String clusterAlias, Index indexToFollow, request.setMaxConcurrentWriteBatches(pattern.getMaxConcurrentWriteBatches()); request.setMaxWriteBufferSize(pattern.getMaxWriteBufferSize()); request.setMaxRetryDelay(pattern.getMaxRetryDelay()); - request.setPollTimeout(pattern.getIdleShardRetryDelay()); + request.setPollTimeout(pattern.getPollTimeout()); // Execute if the create and follow api call succeeds: Runnable successHandler = () -> { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index 748ba03f034bc..2f9dd02648dc7 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -156,7 +156,7 @@ static ClusterState innerPut(PutAutoFollowPatternAction.Request request, request.getMaxConcurrentWriteBatches(), request.getMaxWriteBufferSize(), request.getMaxRetryDelay(), - request.getIdleShardRetryDelay(), + request.getPollTimeout(), filteredHeaders); patterns.put(request.getLeaderClusterAlias(), autoFollowPattern); ClusterState.Builder newState = ClusterState.builder(localState); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java index 6cea48cada8db..1b0f15551916b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java @@ -136,7 +136,7 @@ public void testAutoFollowParameterAreDelegated() throws Exception { request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { - request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500)); + request.setPollTimeout(TimeValue.timeValueMillis(500)); } assertTrue(client().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); @@ -167,8 +167,8 @@ public void testAutoFollowParameterAreDelegated() throws Exception { if (request.getMaxRetryDelay() != null) { assertThat(shardFollowTask.getMaxRetryDelay(), equalTo(request.getMaxRetryDelay())); } - if (request.getIdleShardRetryDelay() != null) { - assertThat(shardFollowTask.getPollTimeout(), equalTo(request.getIdleShardRetryDelay())); + if (request.getPollTimeout() != null) { + assertThat(shardFollowTask.getPollTimeout(), equalTo(request.getPollTimeout())); } }); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java index d3688c1136c6f..6fafaab7995ee 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java @@ -45,7 +45,7 @@ protected PutAutoFollowPatternAction.Request createTestInstance() { request.setFollowIndexNamePattern(randomAlphaOfLength(4)); } if (randomBoolean()) { - request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500)); + request.setPollTimeout(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index cc4ea7b009ec1..75832271bee5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -171,7 +171,7 @@ public static class AutoFollowPattern implements Writeable, ToXContentObject { public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); - public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + public static final ParseField POLL_TIMEOUT = new ParseField("poll_timeout"); private static final ParseField HEADERS = new ParseField("headers"); @SuppressWarnings("unchecked") @@ -193,8 +193,8 @@ public static class AutoFollowPattern implements Writeable, ToXContentObject { (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), - IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + (p, c) -> TimeValue.parseTimeValue(p.text(), POLL_TIMEOUT.getPreferredName()), + POLL_TIMEOUT, ObjectParser.ValueType.STRING); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); } @@ -206,7 +206,7 @@ public static class AutoFollowPattern implements Writeable, ToXContentObject { private final Integer maxConcurrentWriteBatches; private final Integer maxWriteBufferSize; private final TimeValue maxRetryDelay; - private final TimeValue idleShardRetryDelay; + private final TimeValue pollTimeout; private final Map headers; public AutoFollowPattern(List leaderIndexPatterns, @@ -217,7 +217,7 @@ public AutoFollowPattern(List leaderIndexPatterns, Integer maxConcurrentWriteBatches, Integer maxWriteBufferSize, TimeValue maxRetryDelay, - TimeValue idleShardRetryDelay, + TimeValue pollTimeout, Map headers) { this.leaderIndexPatterns = leaderIndexPatterns; this.followIndexPattern = followIndexPattern; @@ -227,7 +227,7 @@ public AutoFollowPattern(List leaderIndexPatterns, this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; this.maxWriteBufferSize = maxWriteBufferSize; this.maxRetryDelay = maxRetryDelay; - this.idleShardRetryDelay = idleShardRetryDelay; + this.pollTimeout = pollTimeout; this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } @@ -240,7 +240,7 @@ public AutoFollowPattern(List leaderIndexPatterns, maxConcurrentWriteBatches = in.readOptionalVInt(); maxWriteBufferSize = in.readOptionalVInt(); maxRetryDelay = in.readOptionalTimeValue(); - idleShardRetryDelay = in.readOptionalTimeValue(); + pollTimeout = in.readOptionalTimeValue(); this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); } @@ -284,8 +284,8 @@ public TimeValue getMaxRetryDelay() { return maxRetryDelay; } - public TimeValue getIdleShardRetryDelay() { - return idleShardRetryDelay; + public TimeValue getPollTimeout() { + return pollTimeout; } public Map getHeaders() { @@ -302,7 +302,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(maxConcurrentWriteBatches); out.writeOptionalVInt(maxWriteBufferSize); out.writeOptionalTimeValue(maxRetryDelay); - out.writeOptionalTimeValue(idleShardRetryDelay); + out.writeOptionalTimeValue(pollTimeout); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } @@ -330,8 +330,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (maxRetryDelay != null) { builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay); } - if (idleShardRetryDelay != null) { - builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay); + if (pollTimeout != null) { + builder.field(POLL_TIMEOUT.getPreferredName(), pollTimeout); } builder.field(HEADERS.getPreferredName(), headers); return builder; @@ -355,7 +355,7 @@ public boolean equals(Object o) { Objects.equals(maxConcurrentWriteBatches, that.maxConcurrentWriteBatches) && Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay) && + Objects.equals(pollTimeout, that.pollTimeout) && Objects.equals(headers, that.headers); } @@ -370,7 +370,7 @@ public int hashCode() { maxConcurrentWriteBatches, maxWriteBufferSize, maxRetryDelay, - idleShardRetryDelay, + pollTimeout, headers ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index 01ebd3f1d81f1..93d8d1fb7d1a2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -59,9 +59,9 @@ public static class Request extends AcknowledgedRequest implements ToXC PARSER.declareField(Request::setMaxRetryDelay, (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.MAX_RETRY_DELAY.getPreferredName()), AutoFollowPattern.MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); - PARSER.declareField(Request::setIdleShardRetryDelay, - (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.IDLE_SHARD_RETRY_DELAY.getPreferredName()), - AutoFollowPattern.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + PARSER.declareField(Request::setPollTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.POLL_TIMEOUT.getPreferredName()), + AutoFollowPattern.POLL_TIMEOUT, ObjectParser.ValueType.STRING); } public static Request fromXContent(XContentParser parser, String remoteClusterAlias) throws IOException { @@ -88,7 +88,7 @@ public static Request fromXContent(XContentParser parser, String remoteClusterAl private Integer maxConcurrentWriteBatches; private Integer maxWriteBufferSize; private TimeValue maxRetryDelay; - private TimeValue idleShardRetryDelay; + private TimeValue pollTimeout; @Override public ActionRequestValidationException validate() { @@ -189,12 +189,12 @@ public void setMaxRetryDelay(TimeValue maxRetryDelay) { this.maxRetryDelay = maxRetryDelay; } - public TimeValue getIdleShardRetryDelay() { - return idleShardRetryDelay; + public TimeValue getPollTimeout() { + return pollTimeout; } - public void setIdleShardRetryDelay(TimeValue idleShardRetryDelay) { - this.idleShardRetryDelay = idleShardRetryDelay; + public void setPollTimeout(TimeValue pollTimeout) { + this.pollTimeout = pollTimeout; } @Override @@ -209,7 +209,7 @@ public void readFrom(StreamInput in) throws IOException { maxConcurrentWriteBatches = in.readOptionalVInt(); maxWriteBufferSize = in.readOptionalVInt(); maxRetryDelay = in.readOptionalTimeValue(); - idleShardRetryDelay = in.readOptionalTimeValue(); + pollTimeout = in.readOptionalTimeValue(); } @Override @@ -224,7 +224,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(maxConcurrentWriteBatches); out.writeOptionalVInt(maxWriteBufferSize); out.writeOptionalTimeValue(maxRetryDelay); - out.writeOptionalTimeValue(idleShardRetryDelay); + out.writeOptionalTimeValue(pollTimeout); } @Override @@ -254,8 +254,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (maxRetryDelay != null) { builder.field(AutoFollowPattern.MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); } - if (idleShardRetryDelay != null) { - builder.field(AutoFollowPattern.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + if (pollTimeout != null) { + builder.field(AutoFollowPattern.POLL_TIMEOUT.getPreferredName(), pollTimeout.getStringRep()); } } builder.endObject(); @@ -276,7 +276,7 @@ public boolean equals(Object o) { Objects.equals(maxConcurrentWriteBatches, request.maxConcurrentWriteBatches) && Objects.equals(maxWriteBufferSize, request.maxWriteBufferSize) && Objects.equals(maxRetryDelay, request.maxRetryDelay) && - Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay); + Objects.equals(pollTimeout, request.pollTimeout); } @Override @@ -291,7 +291,7 @@ public int hashCode() { maxConcurrentWriteBatches, maxWriteBufferSize, maxRetryDelay, - idleShardRetryDelay + pollTimeout ); } } From d78966c4c73df184484287ce303ad0c3976f8494 Mon Sep 17 00:00:00 2001 From: Shaunak Kashyap Date: Wed, 19 Sep 2018 04:49:54 -0700 Subject: [PATCH 46/46] Fixing assertions in integration test (#33833) --- .../xpack/monitoring/integration/MonitoringIT.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index a6f9a14f28b63..f392b1eedc649 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -422,9 +422,6 @@ private void assertIndexStatsMonitoringDoc(final Map document) { assertThat((String) indexStats.get("uuid"), not(isEmptyOrNullString())); assertThat(indexStats.get("created"), notNullValue()); assertThat((String) indexStats.get("status"), not(isEmptyOrNullString())); - assertThat(indexStats.get("version"), notNullValue()); - final Map version = (Map) indexStats.get("version"); - assertEquals(2, version.size()); assertThat(indexStats.get("shards"), notNullValue()); final Map shards = (Map) indexStats.get("shards"); assertEquals(11, shards.size());