From 8fdeaefaa5a3d5b84f67f0326f126a049040f7a5 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 27 Feb 2019 15:33:50 -0800 Subject: [PATCH 01/39] [DOCS] Removes problematic footer from Watcher docs (#39474) --- x-pack/docs/en/watcher/transform/script.asciidoc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/x-pack/docs/en/watcher/transform/script.asciidoc b/x-pack/docs/en/watcher/transform/script.asciidoc index f1a46d482d9e6..9a1377eb5eab7 100644 --- a/x-pack/docs/en/watcher/transform/script.asciidoc +++ b/x-pack/docs/en/watcher/transform/script.asciidoc @@ -39,25 +39,24 @@ The following table lists the possible settings that can be configured: [[transform-script-settings]] .Script Transform Settings -[options="header,footer"] +[options="header"] |====== | Name |Required | Default | Description -| `inline` | yes* | - | When using an inline script, this field holds +| `inline` | yes | - | When using an inline script, this field holds the script itself. -| `id` | yes* | - | When referring to a stored script, this +| `id` | yes | - | When referring to a stored script, this field holds the id of the script. | `lang` | no | `painless` | The script language | `params` | no | - | Additional parameters/variables that are accessible by the script - |====== When using the object notation of the script, one (and only one) of `inline`, -or `id` fields must be defined +or `id` fields must be defined. NOTE: In addition to the provided `params`, the scripts also have access to the <>. From d0ba29c0c133493f6624e0b461dd4572cac4c845 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 28 Feb 2019 06:14:34 +0100 Subject: [PATCH 02/39] Correct name of basic_date_time_no_millis (#39367) (#39457) With this commit we correct the name of the Java time based formatter for `basic_date_time_no_millis`. This fixes a copy&paste error where we have assigned the name `basic_t_time_no_millis` to two different formatters. --- .../main/java/org/elasticsearch/common/time/DateFormatters.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index f29c3bf616fd7..a6e115ca70465 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -243,7 +243,7 @@ public class DateFormatters { * Returns a basic formatter that combines a basic date and time without millis, * separated by a 'T' (yyyyMMdd'T'HHmmssZ). */ - private static final DateFormatter BASIC_DATE_TIME_NO_MILLIS = new JavaDateFormatter("basic_t_time_no_millis", + private static final DateFormatter BASIC_DATE_TIME_NO_MILLIS = new JavaDateFormatter("basic_date_time_no_millis", new DateTimeFormatterBuilder().append(BASIC_DATE_T).append(BASIC_TIME_NO_MILLIS_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_DATE_T).append(BASIC_TIME_NO_MILLIS_BASE) From 2598ca19c19d23dc45c7071b0bc02e2b0ff3ce37 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 27 Feb 2019 16:15:21 +0200 Subject: [PATCH 03/39] Mute Bulk indexing of monitoring data (#39448) Relates: #30101 --- .../resources/rest-api-spec/test/monitoring/bulk/10_basic.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml index 4a96e9ffdd83e..6127e559e3247 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml @@ -1,6 +1,10 @@ --- "Bulk indexing of monitoring data": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/30101" + - do: xpack.monitoring.bulk: system_id: "kibana" From 1c709af221c8b73c45f7c7992de3b964066135e2 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Thu, 28 Feb 2019 11:08:27 +0200 Subject: [PATCH 04/39] [ML] Add integration test for interim results after advancing bucket (#39447) This is an integration test that captures the issue described in elastic/ml-cpp#324 --- ...imResultsIT.java => InterimResultsIT.java} | 57 ++++++++++++++++--- 1 file changed, 49 insertions(+), 8 deletions(-) rename x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/{UpdateInterimResultsIT.java => InterimResultsIT.java} (67%) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsIT.java similarity index 67% rename from x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsIT.java index 4cbeaf1dc482c..5689e7bfe5421 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsIT.java @@ -5,13 +5,16 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.FlushJobAction; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; import org.elasticsearch.xpack.core.ml.action.util.PageParams; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.junit.After; @@ -24,28 +27,25 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -/** - * Tests that interim results get updated correctly - */ -public class UpdateInterimResultsIT extends MlNativeAutodetectIntegTestCase { +public class InterimResultsIT extends MlNativeAutodetectIntegTestCase { - private static final String JOB_ID = "update-interim-test"; private static final long BUCKET_SPAN_SECONDS = 1000; private long time; @After - public void cleanUpTest() throws Exception { + public void cleanUpTest() { cleanUp(); } - public void test() throws Exception { + public void testInterimResultsUpdates() throws Exception { + String jobId = "test-interim-results-updates"; AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( Collections.singletonList(new Detector.Builder("max", "value").build())); analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(BUCKET_SPAN_SECONDS)); DataDescription.Builder dataDescription = new DataDescription.Builder(); dataDescription.setTimeFormat("epoch"); - Job.Builder job = new Job.Builder(JOB_ID); + Job.Builder job = new Job.Builder(jobId); job.setAnalysisConfig(analysisConfig); job.setDataDescription(dataDescription); @@ -106,6 +106,47 @@ public void test() throws Exception { assertThat(bucket.get(0).getRecords().get(0).getActual().get(0), equalTo(16.0)); } + public void testNoInterimResultsAfterAdvancingBucket() throws Exception { + String jobId = "test-no-inerim-results-after-advancing-bucket"; + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( + Collections.singletonList(new Detector.Builder("count", null).build())); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(BUCKET_SPAN_SECONDS)); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder(jobId); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + time = 1400000000; + + // push some data, flush job, verify no interim results + assertThat(postData(job.getId(), createData(50)).getProcessedRecordCount(), equalTo(50L)); + FlushJobAction.Response flushResponse = flushJob(job.getId(), false); + assertThat(getInterimResults(job.getId()).isEmpty(), is(true)); + + // advance time and request interim results + long lastFinalizedBucketEnd = flushResponse.getLastFinalizedBucketEnd().getTime(); + FlushJobAction.Request advanceTimeRequest = new FlushJobAction.Request(jobId); + advanceTimeRequest.setAdvanceTime(String.valueOf(lastFinalizedBucketEnd + BUCKET_SPAN_SECONDS * 1000)); + advanceTimeRequest.setCalcInterim(true); + assertThat(client().execute(FlushJobAction.INSTANCE, advanceTimeRequest).actionGet().isFlushed(), is(true)); + + List interimResults = getInterimResults(job.getId()); + assertThat(interimResults.size(), equalTo(1)); + + // We expect there are no records. The bucket count is low but at the same time + // it is too early into the bucket to consider it an anomaly. Let's verify that. + List records = interimResults.get(0).getRecords(); + List recordsJson = records.stream().map(Strings::toString).collect(Collectors.toList()); + assertThat("Found interim records: " + recordsJson, records.isEmpty(), is(true)); + + closeJob(jobId); + } + private String createData(int halfBuckets) { StringBuilder data = new StringBuilder(); for (int i = 0; i < halfBuckets; i++) { From f62ea67e60642b477814e45c2b56010a57f4eb73 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Thu, 28 Feb 2019 14:40:16 +0200 Subject: [PATCH 05/39] Integ test snapshot and restore for native realm (#39123) This commit adds a simple integ test that exercises the flow: * snapshot .security * delete .security * restore .security , checking that the Native Realm works as expected. Relates #34454 --- .../authc/esnative/NativeRealmIntegTests.java | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 7b131ff5d20b6..948bd2776eaac 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -8,6 +8,8 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; @@ -17,7 +19,10 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; @@ -46,6 +51,7 @@ import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; import org.junit.BeforeClass; @@ -58,12 +64,14 @@ import java.util.concurrent.CountDownLatch; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_SECURITY_INDEX; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -378,6 +386,69 @@ public void testCreateAndUpdateRole() { } } + public void testSnapshotDeleteRestore() { + logger.error("--> creating role"); + securityClient().preparePutRole("test_role") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"create_index"}, null, null, null, true) + .get(); + logger.error("--> creating user"); + securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role", "snapshot_user").get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + logger.info("--> creating repository"); + assertAcked(client().admin().cluster() + .preparePutRepository("test-repo") + .setType("fs").setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + // joe can snapshot all indices, including '.security' + SnapshotInfo snapshotInfo = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() + .prepareCreateSnapshot("test-repo", "test-snap-1") + .setWaitForCompletion(true) + .setIncludeGlobalState(false) + .setIndices(SECURITY_INDEX_NAME) + .get().getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.indices(), contains(SecurityIndexManager.INTERNAL_SECURITY_INDEX)); + deleteSecurityIndex(); + // the realm cache should clear itself but we don't wish to race it + securityClient().prepareClearRealmCache().get(); + // authn fails + final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(Collections.singletonMap("Authorization", token)).admin().indices().prepareCreate("idx").get()); + assertThat(e.status(), is(RestStatus.UNAUTHORIZED)); + // users and roles are missing + GetUsersResponse getUsersResponse = securityClient().prepareGetUsers("joe").get(); + assertThat(getUsersResponse.users().length, is(0)); + GetRolesResponse getRolesResponse = securityClient().prepareGetRoles("test_role").get(); + assertThat(getRolesResponse.roles().length, is(0)); + // restore + RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1") + .setWaitForCompletion(true).setIncludeAliases(true).get(); + assertThat(response.status(), equalTo(RestStatus.OK)); + assertThat(response.getRestoreInfo().indices(), contains(SecurityIndexManager.INTERNAL_SECURITY_INDEX)); + // the realm cache should clear itself but we don't wish to race it + securityClient().prepareClearRealmCache().get(); + // users and roles are retrievable + getUsersResponse = securityClient().prepareGetUsers("joe").get(); + assertThat(getUsersResponse.users().length, is(1)); + assertThat(Arrays.asList(getUsersResponse.users()[0].roles()), contains("test_role", "snapshot_user")); + getRolesResponse = securityClient().prepareGetRoles("test_role").get(); + assertThat(getRolesResponse.roles().length, is(1)); + assertThat(Arrays.asList(getRolesResponse.roles()[0].getClusterPrivileges()), contains("all")); + assertThat(getRolesResponse.roles()[0].getIndicesPrivileges().length, is(1)); + assertThat(Arrays.asList(getRolesResponse.roles()[0].getIndicesPrivileges()[0].getPrivileges()), contains("create_index")); + assertThat(Arrays.asList(getRolesResponse.roles()[0].getIndicesPrivileges()[0].getIndices()), contains("*")); + // joe can create indices + CreateIndexResponse createIndexResponse = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin() + .indices().prepareCreate("idx").get(); + assertThat(createIndexResponse.isAcknowledged(), is (true)); + assertAcked(client().admin().cluster().prepareDeleteRepository("test-repo")); + } + public void testAuthenticateWithDeletedRole() { SecurityClient c = securityClient(); logger.error("--> creating role"); From 229f4c514d3ea3edeb6899206ebf542ab31bd55b Mon Sep 17 00:00:00 2001 From: Shajahan Palayil Date: Thu, 28 Feb 2019 19:59:15 +0100 Subject: [PATCH 06/39] [DOCS] Corrected API path for /_security/api_key (#39519) --- x-pack/docs/en/rest-api/security/get-api-keys.asciidoc | 6 +++--- .../docs/en/rest-api/security/invalidate-api-keys.asciidoc | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc index ab2ef770cb124..9e93186b7da60 100644 --- a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc @@ -59,7 +59,7 @@ The following example retrieves all API keys for the `native1` realm: [source,js] -------------------------------------------------- -GET /_xpack/api_key?realm_name=native1 +GET /_security/api_key?realm_name=native1 -------------------------------------------------- // NOTCONSOLE @@ -67,7 +67,7 @@ The following example retrieves all API keys for the user `myuser` in all realms [source,js] -------------------------------------------------- -GET /_xpack/api_key?username=myuser +GET /_security/api_key?username=myuser -------------------------------------------------- // NOTCONSOLE @@ -76,7 +76,7 @@ Finally, the following example retrieves all API keys for the user `myuser` in [source,js] -------------------------------------------------- -GET /_xpack/api_key?username=myuser&realm_name=native1 +GET /_security/api_key?username=myuser&realm_name=native1 -------------------------------------------------- // NOTCONSOLE diff --git a/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc index 4809e267ebd80..d1ca6f77500a6 100644 --- a/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc @@ -65,7 +65,7 @@ The following example invalidates all API keys for the `native1` realm immediate [source,js] -------------------------------------------------- -DELETE /_xpack/api_key +DELETE /_security/api_key { "realm_name" : "native1" } @@ -76,7 +76,7 @@ The following example invalidates all API keys for the user `myuser` in all real [source,js] -------------------------------------------------- -DELETE /_xpack/api_key +DELETE /_security/api_key { "username" : "myuser" } @@ -88,7 +88,7 @@ Finally, the following example invalidates all API keys for the user `myuser` in [source,js] -------------------------------------------------- -DELETE /_xpack/api_key +DELETE /_security/api_key { "username" : "myuser", "realm_name" : "native1" From 4b725e038f9d05d862c8dc13860656d9a816d04a Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 1 Mar 2019 10:11:43 +0100 Subject: [PATCH 07/39] Snapshot Stability Fixes (#39502) * Snapshot Stability Fixes * Backport of various snapshot stability fixes from `master` to `6.7` * Includes #38368, #38025 and #37612 --- .../cluster/SnapshotsInProgress.java | 36 +- .../snapshots/SnapshotException.java | 4 - .../snapshots/SnapshotShardsService.java | 378 +++++++------- .../snapshots/SnapshotsService.java | 467 +++++++++--------- 4 files changed, 444 insertions(+), 441 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 7308d471afb9d..73be2ea006656 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState.Custom; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -93,9 +94,11 @@ public static class Entry { private final ImmutableOpenMap> waitingIndices; private final long startTime; private final long repositoryStateId; + @Nullable private final String failure; public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, - long startTime, long repositoryStateId, ImmutableOpenMap shards) { + long startTime, long repositoryStateId, ImmutableOpenMap shards, + String failure) { this.state = state; this.snapshot = snapshot; this.includeGlobalState = includeGlobalState; @@ -110,15 +113,26 @@ public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, Sta this.waitingIndices = findWaitingIndices(shards); } this.repositoryStateId = repositoryStateId; + this.failure = failure; + } + + public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, + long startTime, long repositoryStateId, ImmutableOpenMap shards) { + this(snapshot, includeGlobalState, partial, state, indices, startTime, repositoryStateId, shards, null); } public Entry(Entry entry, State state, ImmutableOpenMap shards) { this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, - entry.repositoryStateId, shards); + entry.repositoryStateId, shards, entry.failure); + } + + public Entry(Entry entry, State state, ImmutableOpenMap shards, String failure) { + this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, + entry.repositoryStateId, shards, failure); } public Entry(Entry entry, ImmutableOpenMap shards) { - this(entry, entry.state, shards); + this(entry, entry.state, shards, entry.failure); } public Snapshot snapshot() { @@ -157,6 +171,10 @@ public long getRepositoryStateId() { return repositoryStateId; } + public String failure() { + return failure; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -437,6 +455,12 @@ public SnapshotsInProgress(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { repositoryStateId = in.readLong(); } + final String failure; + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { + failure = in.readOptionalString(); + } else { + failure = null; + } entries[i] = new Entry(snapshot, includeGlobalState, partial, @@ -444,7 +468,8 @@ public SnapshotsInProgress(StreamInput in) throws IOException { Collections.unmodifiableList(indexBuilder), startTime, repositoryStateId, - builder.build()); + builder.build(), + failure); } this.entries = Arrays.asList(entries); } @@ -476,6 +501,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { out.writeLong(entry.repositoryStateId); } + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { + out.writeOptionalString(entry.failure); + } } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java index d389ed634f3af..05db85d6f7211 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java @@ -51,10 +51,6 @@ public SnapshotException(final Snapshot snapshot, final String msg, final Throwa } } - public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg) { - this(repositoryName, snapshotId, msg, null); - } - public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg, final Throwable cause) { super("[" + repositoryName + ":" + snapshotId + "] " + msg, cause); this.repositoryName = repositoryName; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 3f1cf1db32807..116a3f45b0087 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -69,26 +69,26 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestDeduplicator; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; -import static java.util.Collections.unmodifiableMap; +import static java.util.Collections.unmodifiableList; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; import static org.elasticsearch.transport.EmptyTransportResponseHandler.INSTANCE_SAME; @@ -114,11 +114,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final ThreadPool threadPool; - private final Lock shutdownLock = new ReentrantLock(); + private final Map> shardSnapshots = new HashMap<>(); - private final Condition shutdownCondition = shutdownLock.newCondition(); - - private volatile Map> shardSnapshots = emptyMap(); + // A map of snapshots to the shardIds that we already reported to the master as failed + private final TransportRequestDeduplicator remoteFailedRequestDeduplicator = + new TransportRequestDeduplicator<>(); private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; @@ -139,7 +139,7 @@ public SnapshotShardsService(Settings settings, ClusterService clusterService, S } // The constructor of UpdateSnapshotStatusAction will register itself to the TransportService. - this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction(settings, UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction( transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver); if (DiscoveryNode.isMasterNode(settings)) { @@ -147,7 +147,6 @@ public SnapshotShardsService(Settings settings, ClusterService clusterService, S transportService.registerRequestHandler(UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6, UpdateSnapshotStatusRequestV6::new, ThreadPool.Names.SAME, new UpdateSnapshotStateRequestHandlerV6()); } - } @Override @@ -161,16 +160,6 @@ protected void doStart() { @Override protected void doStop() { - shutdownLock.lock(); - try { - while(!shardSnapshots.isEmpty() && shutdownCondition.await(5, TimeUnit.SECONDS)) { - // Wait for at most 5 second for locally running snapshots to finish - } - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } finally { - shutdownLock.unlock(); - } } @Override @@ -185,7 +174,9 @@ public void clusterChanged(ClusterChangedEvent event) { SnapshotsInProgress currentSnapshots = event.state().custom(SnapshotsInProgress.TYPE); if ((previousSnapshots == null && currentSnapshots != null) || (previousSnapshots != null && previousSnapshots.equals(currentSnapshots) == false)) { - processIndexShardSnapshots(event); + synchronized (shardSnapshots) { + processIndexShardSnapshots(currentSnapshots, event.state().nodes().getMasterNode()); + } } String previousMasterNodeId = event.previousState().nodes().getMasterNodeId(); @@ -202,13 +193,14 @@ public void clusterChanged(ClusterChangedEvent event) { @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { // abort any snapshots occurring on the soon-to-be closed shard - Map> snapshotShardsMap = shardSnapshots; - for (Map.Entry> snapshotShards : snapshotShardsMap.entrySet()) { - Map shards = snapshotShards.getValue(); - if (shards.containsKey(shardId)) { - logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", - shardId, snapshotShards.getKey().getSnapshotId()); - shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); + synchronized (shardSnapshots) { + for (Map.Entry> snapshotShards : shardSnapshots.entrySet()) { + Map shards = snapshotShards.getValue(); + if (shards.containsKey(shardId)) { + logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", + shardId, snapshotShards.getKey().getSnapshotId()); + shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); + } } } } @@ -223,163 +215,146 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh * @return map of shard id to snapshot status */ public Map currentSnapshotShards(Snapshot snapshot) { - return shardSnapshots.get(snapshot); + synchronized (shardSnapshots) { + final Map current = shardSnapshots.get(snapshot); + return current == null ? null : new HashMap<>(current); + } } /** * Checks if any new shards should be snapshotted on this node * - * @param event cluster state changed event + * @param snapshotsInProgress Current snapshots in progress in cluster state */ - private void processIndexShardSnapshots(ClusterChangedEvent event) { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - Map> survivors = new HashMap<>(); + private void processIndexShardSnapshots(SnapshotsInProgress snapshotsInProgress, DiscoveryNode masterNode) { + cancelRemoved(snapshotsInProgress); + if (snapshotsInProgress != null) { + startNewSnapshots(snapshotsInProgress, masterNode); + } + } + + private void cancelRemoved(@Nullable SnapshotsInProgress snapshotsInProgress) { // First, remove snapshots that are no longer there - for (Map.Entry> entry : shardSnapshots.entrySet()) { + Iterator>> it = shardSnapshots.entrySet().iterator(); + while (it.hasNext()) { + final Map.Entry> entry = it.next(); final Snapshot snapshot = entry.getKey(); - if (snapshotsInProgress != null && snapshotsInProgress.snapshot(snapshot) != null) { - survivors.put(entry.getKey(), entry.getValue()); - } else { + if (snapshotsInProgress == null || snapshotsInProgress.snapshot(snapshot) == null) { // abort any running snapshots of shards for the removed entry; // this could happen if for some reason the cluster state update for aborting // running shards is missed, then the snapshot is removed is a subsequent cluster // state update, which is being processed here + it.remove(); for (IndexShardSnapshotStatus snapshotStatus : entry.getValue().values()) { snapshotStatus.abortIfNotCompleted("snapshot has been removed in cluster state, aborting"); } } } + } + private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress, DiscoveryNode masterNode) { // For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running // snapshots in the future - Map> newSnapshots = new HashMap<>(); // Now go through all snapshots and update existing or create missing - final String localNodeId = event.state().nodes().getLocalNodeId(); - final DiscoveryNode masterNode = event.state().nodes().getMasterNode(); - final Map> snapshotIndices = new HashMap<>(); - if (snapshotsInProgress != null) { - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - snapshotIndices.put(entry.snapshot(), - entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity()))); - if (entry.state() == State.STARTED) { - Map startedShards = new HashMap<>(); - Map snapshotShards = shardSnapshots.get(entry.snapshot()); - for (ObjectObjectCursor shard : entry.shards()) { - // Add all new shards to start processing on - if (localNodeId.equals(shard.value.nodeId())) { - if (shard.value.state() == State.INIT && (snapshotShards == null || !snapshotShards.containsKey(shard.key))) { - logger.trace("[{}] - Adding shard to the queue", shard.key); - startedShards.put(shard.key, IndexShardSnapshotStatus.newInitializing()); - } + final String localNodeId = clusterService.localNode().getId(); + for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { + final State entryState = entry.state(); + if (entryState == State.STARTED) { + Map startedShards = null; + final Snapshot snapshot = entry.snapshot(); + Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); + for (ObjectObjectCursor shard : entry.shards()) { + // Add all new shards to start processing on + final ShardId shardId = shard.key; + final ShardSnapshotStatus shardSnapshotStatus = shard.value; + if (localNodeId.equals(shardSnapshotStatus.nodeId()) && shardSnapshotStatus.state() == State.INIT + && snapshotShards.containsKey(shardId) == false) { + logger.trace("[{}] - Adding shard to the queue", shardId); + if (startedShards == null) { + startedShards = new HashMap<>(); } + startedShards.put(shardId, IndexShardSnapshotStatus.newInitializing()); } - if (!startedShards.isEmpty()) { - newSnapshots.put(entry.snapshot(), startedShards); - if (snapshotShards != null) { - // We already saw this snapshot but we need to add more started shards - Map shards = new HashMap<>(); - // Put all shards that were already running on this node - shards.putAll(snapshotShards); - // Put all newly started shards - shards.putAll(startedShards); - survivors.put(entry.snapshot(), unmodifiableMap(shards)); - } else { - // Brand new snapshot that we haven't seen before - survivors.put(entry.snapshot(), unmodifiableMap(startedShards)); + } + if (startedShards != null && startedShards.isEmpty() == false) { + shardSnapshots.computeIfAbsent(snapshot, s -> new HashMap<>()).putAll(startedShards); + startNewShards(entry, startedShards, masterNode); + } + } else if (entryState == State.ABORTED) { + // Abort all running shards for this snapshot + final Snapshot snapshot = entry.snapshot(); + Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); + for (ObjectObjectCursor shard : entry.shards()) { + final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); + if (snapshotStatus != null) { + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = + snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); + final Stage stage = lastSnapshotStatus.getStage(); + if (stage == Stage.FINALIZE) { + logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + + "letting it finish", snapshot, shard.key); + } else if (stage == Stage.DONE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + + "updating status on the master", snapshot, shard.key); + notifySuccessfulSnapshotShard(snapshot, shard.key, masterNode); + } else if (stage == Stage.FAILURE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + + "updating status on the master", snapshot, shard.key); + notifyFailedSnapshotShard(snapshot, shard.key, lastSnapshotStatus.getFailure(), masterNode); } - } - } else if (entry.state() == State.ABORTED) { - // Abort all running shards for this snapshot - Map snapshotShards = shardSnapshots.get(entry.snapshot()); - if (snapshotShards != null) { - final String failure = "snapshot has been aborted"; - for (ObjectObjectCursor shard : entry.shards()) { - final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); - if (snapshotStatus != null) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.abortIfNotCompleted(failure); - final Stage stage = lastSnapshotStatus.getStage(); - if (stage == Stage.FINALIZE) { - logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + - "letting it finish", entry.snapshot(), shard.key); - - } else if (stage == Stage.DONE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + - "updating status on the master", entry.snapshot(), shard.key); - notifySuccessfulSnapshotShard(entry.snapshot(), shard.key, localNodeId, masterNode); - - } else if (stage == Stage.FAILURE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + - "updating status on the master", entry.snapshot(), shard.key); - final String snapshotFailure = lastSnapshotStatus.getFailure(); - notifyFailedSnapshotShard(entry.snapshot(), shard.key, localNodeId, snapshotFailure, masterNode); - } - } + } else { + // due to CS batching we might have missed the INIT state and straight went into ABORTED + // notify master that abort has completed by moving to FAILED + if (shard.value.state() == State.ABORTED) { + notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason(), masterNode); } } } } } + } - // Update the list of snapshots that we saw and tried to started - // If startup of these shards fails later, we don't want to try starting these shards again - shutdownLock.lock(); - try { - shardSnapshots = unmodifiableMap(survivors); - if (shardSnapshots.isEmpty()) { - // Notify all waiting threads that no more snapshots - shutdownCondition.signalAll(); - } - } finally { - shutdownLock.unlock(); - } - - // We have new shards to starts - if (newSnapshots.isEmpty() == false) { - Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - for (final Map.Entry> entry : newSnapshots.entrySet()) { - final Snapshot snapshot = entry.getKey(); - final Map indicesMap = snapshotIndices.get(snapshot); - assert indicesMap != null; - - for (final Map.Entry shardEntry : entry.getValue().entrySet()) { - final ShardId shardId = shardEntry.getKey(); - final IndexId indexId = indicesMap.get(shardId.getIndexName()); - executor.execute(new AbstractRunnable() { + private void startNewShards(SnapshotsInProgress.Entry entry, Map startedShards, + DiscoveryNode masterNode) { + final Snapshot snapshot = entry.snapshot(); + final Map indicesMap = entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + for (final Map.Entry shardEntry : startedShards.entrySet()) { + final ShardId shardId = shardEntry.getKey(); + final IndexId indexId = indicesMap.get(shardId.getIndexName()); + assert indexId != null; + executor.execute(new AbstractRunnable() { - final SetOnce failure = new SetOnce<>(); + private final SetOnce failure = new SetOnce<>(); - @Override - public void doRun() { - final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); - assert indexId != null; - snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); - } + @Override + public void doRun() { + final IndexShard indexShard = + indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); + } - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", - shardId, snapshot), e); - failure.set(e); - } + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + failure.set(e); + } - @Override - public void onRejection(Exception e) { - failure.set(e); - } + @Override + public void onRejection(Exception e) { + failure.set(e); + } - @Override - public void onAfter() { - final Exception exception = failure.get(); - if (exception != null) { - final String failure = ExceptionsHelper.detailedMessage(exception); - notifyFailedSnapshotShard(snapshot, shardId, localNodeId, failure, masterNode); - } else { - notifySuccessfulSnapshotShard(snapshot, shardId, localNodeId, masterNode); - } - } - }); + @Override + public void onAfter() { + final Exception exception = failure.get(); + if (exception != null) { + notifyFailedSnapshotShard(snapshot, shardId, ExceptionsHelper.detailedMessage(exception), masterNode); + } else { + notifySuccessfulSnapshotShard(snapshot, shardId, masterNode); + } } - } + }); } } @@ -432,8 +407,6 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { if (snapshotsInProgress == null) { return; } - - final String localNodeId = event.state().nodes().getLocalNodeId(); final DiscoveryNode masterNode = event.state().nodes().getMasterNode(); for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { @@ -442,7 +415,6 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { ImmutableOpenMap masterShards = snapshot.shards(); for(Map.Entry localShard : localShards.entrySet()) { ShardId shardId = localShard.getKey(); - IndexShardSnapshotStatus localShardStatus = localShard.getValue(); ShardSnapshotStatus masterShard = masterShards.get(shardId); if (masterShard != null && masterShard.state().completed() == false) { final IndexShardSnapshotStatus.Copy indexShardSnapshotStatus = localShard.getValue().asCopy(); @@ -452,14 +424,13 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { // but we think the shard is done - we need to make new master know that the shard is done logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard is done locally, " + "updating status on the master", snapshot.snapshot(), shardId); - notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localNodeId, masterNode); + notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, masterNode); } else if (stage == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard failed locally, " + "updating status on master", snapshot.snapshot(), shardId); - final String failure = indexShardSnapshotStatus.getFailure(); - notifyFailedSnapshotShard(snapshot.snapshot(), shardId, localNodeId, failure, masterNode); + notifyFailedSnapshotShard(snapshot.snapshot(), shardId, indexShardSnapshotStatus.getFailure(), masterNode); } } } @@ -528,34 +499,64 @@ public String toString() { } /** Notify the master node that the given shard has been successfully snapshotted **/ - void notifySuccessfulSnapshotShard(final Snapshot snapshot, - final ShardId shardId, - final String localNodeId, - final DiscoveryNode masterNode) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(localNodeId, State.SUCCESS), masterNode); + private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, DiscoveryNode masterNode) { + sendSnapshotShardUpdate( + snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.SUCCESS), masterNode); } /** Notify the master node that the given shard failed to be snapshotted **/ - void notifyFailedSnapshotShard(final Snapshot snapshot, - final ShardId shardId, - final String localNodeId, - final String failure, - final DiscoveryNode masterNode) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(localNodeId, State.FAILED, failure), masterNode); + private void notifyFailedSnapshotShard(Snapshot snapshot, ShardId shardId, String failure, DiscoveryNode masterNode) { + sendSnapshotShardUpdate( + snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.FAILED, failure), masterNode); } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ - void sendSnapshotShardUpdate(final Snapshot snapshot, - final ShardId shardId, - final ShardSnapshotStatus status, - final DiscoveryNode masterNode) { + void sendSnapshotShardUpdate(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status, DiscoveryNode masterNode) { try { if (masterNode.getVersion().onOrAfter(Version.V_6_1_0)) { UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME); } else { - UpdateSnapshotStatusRequestV6 requestV6 = new UpdateSnapshotStatusRequestV6(snapshot, shardId, status); - transportService.sendRequest(masterNode, UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6, requestV6, INSTANCE_SAME); + remoteFailedRequestDeduplicator.executeOnce( + new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status), + new ActionListener() { + @Override + public void onResponse(Void aVoid) { + logger.trace("[{}] [{}] updated snapshot state", snapshot, status); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); + } + }, + (req, reqListener) -> transportService.sendRequest( + transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, req, + new TransportResponseHandler() { + @Override + public UpdateIndexShardSnapshotStatusResponse read(StreamInput in) throws IOException { + final UpdateIndexShardSnapshotStatusResponse response = new UpdateIndexShardSnapshotStatusResponse(); + response.readFrom(in); + return response; + } + + @Override + public void handleResponse(UpdateIndexShardSnapshotStatusResponse response) { + reqListener.onResponse(null); + } + + @Override + public void handleException(TransportException exp) { + reqListener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }) + ); } } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); @@ -588,11 +589,11 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS }); } - class SnapshotStateExecutor implements ClusterStateTaskExecutor { + private class SnapshotStateExecutor implements ClusterStateTaskExecutor { @Override public ClusterTasksResult - execute(ClusterState currentState, List tasks) throws Exception { + execute(ClusterState currentState, List tasks) { final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots != null) { int changedCount = 0; @@ -622,8 +623,6 @@ class SnapshotStateExecutor implements ClusterStateTaskExecutor 0) { logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount); - - final SnapshotsInProgress updatedSnapshots = - new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterTasksResult.builder().successes(tasks).build( - ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build()); + return ClusterTasksResult.builder().successes(tasks) + .build(ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, + new SnapshotsInProgress(unmodifiableList(entries))).build()); } } return ClusterTasksResult.builder().successes(tasks).build(currentState); @@ -646,13 +643,14 @@ static class UpdateIndexShardSnapshotStatusResponse extends ActionResponse { } - class UpdateSnapshotStatusAction extends - TransportMasterNodeAction { - UpdateSnapshotStatusAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, actionName, transportService, clusterService, threadPool, - actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new); + private class UpdateSnapshotStatusAction + extends TransportMasterNodeAction { + UpdateSnapshotStatusAction(TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super( + settings, SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, + actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new + ); } @Override @@ -667,7 +665,7 @@ protected UpdateIndexShardSnapshotStatusResponse newResponse() { @Override protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, - ActionListener listener) throws Exception { + ActionListener listener) { innerUpdateSnapshotState(request, listener); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index c7bf91b476c5b..998ab2a38639b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -83,7 +83,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; @@ -98,9 +100,9 @@ * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, ActionListener)} method kicks in and initializes * the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state *
  • Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes - * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(ClusterChangedEvent)} method
  • + * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots} method *
  • Once shard snapshot is created data node updates state of the shard in the cluster state using - * the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus, DiscoveryNode)} method
  • + * the {@link SnapshotShardsService#sendSnapshotShardUpdate} method *
  • When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot * as completed
  • *
  • After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository, @@ -121,6 +123,12 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final Map>> snapshotCompletionListeners = new ConcurrentHashMap<>(); + // Set of snapshots that are currently being initialized by this node + private final Set initializingSnapshots = Collections.synchronizedSet(new HashSet<>()); + + // Set of snapshots that are currently being ended by this node + private final Set endingSnapshots = Collections.synchronizedSet(new HashSet<>()); + @Inject public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) { @@ -207,7 +215,7 @@ public List snapshots(final String repositoryName, } final ArrayList snapshotList = new ArrayList<>(snapshotSet); CollectionUtil.timSort(snapshotList); - return Collections.unmodifiableList(snapshotList); + return unmodifiableList(snapshotList); } /** @@ -223,7 +231,7 @@ public List currentSnapshots(final String repositoryName) { snapshotList.add(inProgressSnapshot(entry)); } CollectionUtil.timSort(snapshotList); - return Collections.unmodifiableList(snapshotList); + return unmodifiableList(snapshotList); } /** @@ -269,7 +277,7 @@ public ClusterState execute(ClusterState currentState) { if (snapshots == null || snapshots.entries().isEmpty()) { // Store newSnapshot here to be processed in clusterStateProcessed List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, - request.indicesOptions(), request.indices())); + request.indicesOptions(), request.indices())); logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); List snapshotIndices = repositoryData.resolveNewIndices(indices); newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId), @@ -280,6 +288,7 @@ public ClusterState execute(ClusterState currentState) { System.currentTimeMillis(), repositoryData.getGenId(), null); + initializingSnapshots.add(newSnapshot.snapshot()); snapshots = new SnapshotsInProgress(newSnapshot); } else { throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); @@ -290,6 +299,9 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); + if (newSnapshot != null) { + initializingSnapshots.remove(newSnapshot.snapshot()); + } newSnapshot = null; listener.onFailure(e); } @@ -297,7 +309,21 @@ public void onFailure(String source, Exception e) { @Override public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { if (newSnapshot != null) { - beginSnapshot(newState, newSnapshot, request.partial(), listener); + final Snapshot current = newSnapshot.snapshot(); + assert initializingSnapshots.contains(current); + beginSnapshot(newState, newSnapshot, request.partial(), new ActionListener() { + @Override + public void onResponse(final Snapshot snapshot) { + initializingSnapshots.remove(snapshot); + listener.onResponse(snapshot); + } + + @Override + public void onFailure(final Exception e) { + initializingSnapshots.remove(current); + listener.onFailure(e); + } + }); } } @@ -305,7 +331,6 @@ public void clusterStateProcessed(String source, ClusterState oldState, final Cl public TimeValue timeout() { return request.masterNodeTimeout(); } - }); } @@ -368,8 +393,11 @@ private void beginSnapshot(final ClusterState clusterState, boolean snapshotCreated; + boolean hadAbortedInitializations; + @Override protected void doRun() { + assert initializingSnapshots.contains(snapshot.snapshot()); Repository repository = repositoriesService.repository(snapshot.snapshot().getRepository()); MetaData metaData = clusterState.metaData(); @@ -394,9 +422,6 @@ protected void doRun() { } clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - SnapshotsInProgress.Entry endSnapshot; - String failure; - @Override public ClusterState execute(ClusterState currentState) { SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); @@ -407,9 +432,13 @@ public ClusterState execute(ClusterState currentState) { continue; } - if (entry.state() != State.ABORTED) { - // Replace the snapshot that was just intialized - ImmutableOpenMap shards = + if (entry.state() == State.ABORTED) { + entries.add(entry); + assert entry.shards().isEmpty(); + hadAbortedInitializations = true; + } else { + // Replace the snapshot that was just initialized + ImmutableOpenMap shards = shards(currentState, entry.indices()); if (!partial) { Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, @@ -417,9 +446,6 @@ public ClusterState execute(ClusterState currentState) { Set missing = indicesWithMissingShards.v1(); Set closed = indicesWithMissingShards.v2(); if (missing.isEmpty() == false || closed.isEmpty() == false) { - endSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); - entries.add(endSnapshot); - final StringBuilder failureMessage = new StringBuilder(); if (missing.isEmpty() == false) { failureMessage.append("Indices don't have primary shards "); @@ -432,24 +458,15 @@ public ClusterState execute(ClusterState currentState) { failureMessage.append("Indices are closed "); failureMessage.append(closed); } - failure = failureMessage.toString(); + entries.add(new SnapshotsInProgress.Entry(entry, State.FAILED, shards, failureMessage.toString())); continue; } } - SnapshotsInProgress.Entry updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); - entries.add(updatedSnapshot); - if (completed(shards.values())) { - endSnapshot = updatedSnapshot; - } - } else { - assert entry.state() == State.ABORTED : "expecting snapshot to be aborted during initialization"; - failure = "snapshot was aborted during initialization"; - endSnapshot = entry; - entries.add(endSnapshot); + entries.add(new SnapshotsInProgress.Entry(entry, State.STARTED, shards)); } } return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))) .build(); } @@ -478,12 +495,12 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS // should still exist when listener is registered. userCreateSnapshotListener.onResponse(snapshot.snapshot()); - // Now that snapshot completion listener is registered we can end the snapshot if needed - // We should end snapshot only if 1) we didn't accept it for processing (which happens when there - // is nothing to do) and 2) there was a snapshot in metadata that we should end. Otherwise we should - // go ahead and continue working on this snapshot rather then end here. - if (endSnapshot != null) { - endSnapshot(endSnapshot, failure); + if (hadAbortedInitializations) { + final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE); + assert snapshotsInProgress != null; + final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); + assert entry != null; + endSnapshot(entry); } } }); @@ -525,7 +542,7 @@ public void onFailure(Exception e) { cleanupAfterError(e); } - public void onNoLongerMaster(String source) { + public void onNoLongerMaster() { userCreateSnapshotListener.onFailure(e); } @@ -552,7 +569,7 @@ private void cleanupAfterError(Exception exception) { } - private SnapshotInfo inProgressSnapshot(SnapshotsInProgress.Entry entry) { + private static SnapshotInfo inProgressSnapshot(SnapshotsInProgress.Entry entry) { return new SnapshotInfo(entry.snapshot().getSnapshotId(), entry.indices().stream().map(IndexId::getName).collect(Collectors.toList()), entry.startTime(), entry.includeGlobalState()); @@ -610,7 +627,7 @@ public List currentSnapshots(final String repository, builder.add(entry); } } - return Collections.unmodifiableList(builder); + return unmodifiableList(builder); } /** @@ -666,7 +683,7 @@ public Map snapshotShards(final String reposi return unmodifiableMap(shardStatus); } - private SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { + private static SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { for (SnapshotShardFailure shardFailure : shardFailures) { if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) { return shardFailure; @@ -680,14 +697,28 @@ public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { // We don't remove old master when master flips anymore. So, we need to check for change in master - if (event.nodesRemoved() || event.previousState().nodes().isLocalNodeElectedMaster() == false) { - processSnapshotsOnRemovedNodes(event); + final SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); + final boolean newMaster = event.previousState().nodes().isLocalNodeElectedMaster() == false; + if (snapshotsInProgress != null) { + if (newMaster || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes())) { + processSnapshotsOnRemovedNodes(); + } + if (event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event)) { + processStartedShards(); + } + // Cleanup all snapshots that have no more work left: + // 1. Completed snapshots + // 2. Snapshots in state INIT that the previous master failed to start + // 3. Snapshots in any other state that have all their shard tasks completed + snapshotsInProgress.entries().stream().filter( + entry -> entry.state().completed() + || initializingSnapshots.contains(entry.snapshot()) == false + && (entry.state() == State.INIT || completed(entry.shards().values())) + ).forEach(this::endSnapshot); } - if (event.routingTableChanged()) { - processStartedShards(event); + if (newMaster) { + finalizeSnapshotDeletionFromPreviousMaster(event); } - removeFinishedSnapshotFromClusterState(event); - finalizeSnapshotDeletionFromPreviousMaster(event); } } catch (Exception e) { logger.warn("Failed to update snapshot state ", e); @@ -706,166 +737,134 @@ public void applyClusterState(ClusterChangedEvent event) { * snapshot was deleted and a call to GET snapshots would reveal that the snapshot no longer exists. */ private void finalizeSnapshotDeletionFromPreviousMaster(ClusterChangedEvent event) { - if (event.localNodeMaster() && event.previousState().nodes().isLocalNodeElectedMaster() == false) { - SnapshotDeletionsInProgress deletionsInProgress = event.state().custom(SnapshotDeletionsInProgress.TYPE); - if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - assert deletionsInProgress.getEntries().size() == 1 : "only one in-progress deletion allowed per cluster"; - SnapshotDeletionsInProgress.Entry entry = deletionsInProgress.getEntries().get(0); - deleteSnapshotFromRepository(entry.getSnapshot(), null, entry.getRepositoryStateId()); - } + SnapshotDeletionsInProgress deletionsInProgress = event.state().custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + assert deletionsInProgress.getEntries().size() == 1 : "only one in-progress deletion allowed per cluster"; + SnapshotDeletionsInProgress.Entry entry = deletionsInProgress.getEntries().get(0); + deleteSnapshotFromRepository(entry.getSnapshot(), null, entry.getRepositoryStateId()); } } /** - * Removes a finished snapshot from the cluster state. This can happen if the previous - * master node processed a cluster state update that marked the snapshot as finished, - * but the previous master node died before removing the snapshot in progress from the - * cluster state. It is then the responsibility of the new master node to end the - * snapshot and remove it from the cluster state. + * Cleans up shard snapshots that were running on removed nodes */ - private void removeFinishedSnapshotFromClusterState(ClusterChangedEvent event) { - if (event.localNodeMaster() && !event.previousState().nodes().isLocalNodeElectedMaster()) { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - if (snapshotsInProgress != null && !snapshotsInProgress.entries().isEmpty()) { - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - if (entry.state().completed()) { - endSnapshot(entry); + private void processSnapshotsOnRemovedNodes() { + clusterService.submitStateUpdateTask("update snapshot state after node removal", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + DiscoveryNodes nodes = currentState.nodes(); + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots == null) { + return currentState; + } + boolean changed = false; + ArrayList entries = new ArrayList<>(); + for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { + SnapshotsInProgress.Entry updatedSnapshot = snapshot; + if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { + ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); + boolean snapshotChanged = false; + for (ObjectObjectCursor shardEntry : snapshot.shards()) { + ShardSnapshotStatus shardStatus = shardEntry.value; + if (!shardStatus.state().completed() && shardStatus.nodeId() != null) { + if (nodes.nodeExists(shardStatus.nodeId())) { + shards.put(shardEntry.key, shardEntry.value); + } else { + // TODO: Restart snapshot on another node? + snapshotChanged = true; + logger.warn("failing snapshot of shard [{}] on closed node [{}]", + shardEntry.key, shardStatus.nodeId()); + shards.put(shardEntry.key, + new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown")); + } + } + } + if (snapshotChanged) { + changed = true; + ImmutableOpenMap shardsMap = shards.build(); + if (!snapshot.state().completed() && completed(shardsMap.values())) { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shardsMap); + } else { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, snapshot.state(), shardsMap); + } + } + entries.add(updatedSnapshot); + } else if (snapshot.state() == State.INIT && initializingSnapshots.contains(snapshot.snapshot()) == false) { + changed = true; + // Mark the snapshot as aborted as it failed to start from the previous master + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); + entries.add(updatedSnapshot); + + // Clean up the snapshot that failed to start from the old master + deleteSnapshot(snapshot.snapshot(), new ActionListener() { + @Override + public void onResponse(Void aVoid) { + logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshot()); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshot()); + } + }, updatedSnapshot.getRepositoryStateId(), false); } } + if (changed) { + return ClusterState.builder(currentState) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); + } + return currentState; } - } + + @Override + public void onFailure(String source, Exception e) { + logger.warn("failed to update snapshot state after node removal"); + } + }); } - /** - * Cleans up shard snapshots that were running on removed nodes - * - * @param event cluster changed event - */ - private void processSnapshotsOnRemovedNodes(ClusterChangedEvent event) { - if (removedNodesCleanupNeeded(event)) { - // Check if we just became the master - final boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster(); - clusterService.submitStateUpdateTask("update snapshot state after node removal", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - DiscoveryNodes nodes = currentState.nodes(); - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - if (snapshots == null) { - return currentState; - } + private void processStartedShards() { + clusterService.submitStateUpdateTask("update snapshot state after shards started", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + RoutingTable routingTable = currentState.routingTable(); + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots != null) { boolean changed = false; ArrayList entries = new ArrayList<>(); for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { SnapshotsInProgress.Entry updatedSnapshot = snapshot; - boolean snapshotChanged = false; - if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { - ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - for (ObjectObjectCursor shardEntry : snapshot.shards()) { - ShardSnapshotStatus shardStatus = shardEntry.value; - if (!shardStatus.state().completed() && shardStatus.nodeId() != null) { - if (nodes.nodeExists(shardStatus.nodeId())) { - shards.put(shardEntry.key, shardEntry.value); - } else { - // TODO: Restart snapshot on another node? - snapshotChanged = true; - logger.warn("failing snapshot of shard [{}] on closed node [{}]", - shardEntry.key, shardStatus.nodeId()); - shards.put(shardEntry.key, new ShardSnapshotStatus(shardStatus.nodeId(), - State.FAILED, "node shutdown")); - } - } - } - if (snapshotChanged) { + if (snapshot.state() == State.STARTED) { + ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), + routingTable); + if (shards != null) { changed = true; - ImmutableOpenMap shardsMap = shards.build(); - if (!snapshot.state().completed() && completed(shardsMap.values())) { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shardsMap); - endSnapshot(updatedSnapshot); + if (!snapshot.state().completed() && completed(shards.values())) { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shards); } else { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, snapshot.state(), shardsMap); + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, shards); } } entries.add(updatedSnapshot); - } else if (snapshot.state() == State.INIT && newMaster) { - changed = true; - // Mark the snapshot as aborted as it failed to start from the previous master - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); - entries.add(updatedSnapshot); - - // Clean up the snapshot that failed to start from the old master - deleteSnapshot(snapshot.snapshot(), new ActionListener() { - @Override - public void onResponse(Void aVoid) { - logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshot()); - } - - @Override - public void onFailure(Exception e) { - logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshot()); - } - }, updatedSnapshot.getRepositoryStateId(), false); } } if (changed) { - snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } - return currentState; - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn("failed to update snapshot state after node removal"); - } - }); - } - } - - private void processStartedShards(ClusterChangedEvent event) { - if (waitingShardsStartedOrUnassigned(event)) { - clusterService.submitStateUpdateTask("update snapshot state after shards started", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - RoutingTable routingTable = currentState.routingTable(); - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - if (snapshots != null) { - boolean changed = false; - ArrayList entries = new ArrayList<>(); - for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { - SnapshotsInProgress.Entry updatedSnapshot = snapshot; - if (snapshot.state() == State.STARTED) { - ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), - routingTable); - if (shards != null) { - changed = true; - if (!snapshot.state().completed() && completed(shards.values())) { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shards); - endSnapshot(updatedSnapshot); - } else { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, shards); - } - } - entries.add(updatedSnapshot); - } - } - if (changed) { - snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } + return ClusterState.builder(currentState) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); } - return currentState; } + return currentState; + } - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> - new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); - } - }); - } + @Override + public void onFailure(String source, Exception e) { + logger.warn(() -> + new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); + } + }); } - private ImmutableOpenMap processWaitingShards( + private static ImmutableOpenMap processWaitingShards( ImmutableOpenMap snapshotShards, RoutingTable routingTable) { boolean snapshotChanged = false; ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); @@ -905,19 +904,16 @@ private ImmutableOpenMap processWaitingShards( } } - private boolean waitingShardsStartedOrUnassigned(ClusterChangedEvent event) { - SnapshotsInProgress curr = event.state().custom(SnapshotsInProgress.TYPE); - if (curr != null) { - for (SnapshotsInProgress.Entry entry : curr.entries()) { - if (entry.state() == State.STARTED && !entry.waitingIndices().isEmpty()) { - for (ObjectCursor index : entry.waitingIndices().keys()) { - if (event.indexRoutingTableChanged(index.value)) { - IndexRoutingTable indexShardRoutingTable = event.state().getRoutingTable().index(index.value); - for (ShardId shardId : entry.waitingIndices().get(index.value)) { - ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.id()).primaryShard(); - if (shardRouting != null && (shardRouting.started() || shardRouting.unassigned())) { - return true; - } + private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snapshotsInProgress, ClusterChangedEvent event) { + for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { + if (entry.state() == State.STARTED) { + for (ObjectCursor index : entry.waitingIndices().keys()) { + if (event.indexRoutingTableChanged(index.value)) { + IndexRoutingTable indexShardRoutingTable = event.state().getRoutingTable().index(index.value); + for (ShardId shardId : entry.waitingIndices().get(index.value)) { + ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.id()).primaryShard(); + if (shardRouting != null && (shardRouting.started() || shardRouting.unassigned())) { + return true; } } } @@ -927,28 +923,12 @@ private boolean waitingShardsStartedOrUnassigned(ClusterChangedEvent event) { return false; } - private boolean removedNodesCleanupNeeded(ClusterChangedEvent event) { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - if (snapshotsInProgress == null) { - return false; - } - // Check if we just became the master - boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster(); - for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { - if (newMaster && (snapshot.state() == State.SUCCESS || snapshot.state() == State.INIT)) { - // We just replaced old master and snapshots in intermediate states needs to be cleaned - return true; - } - for (DiscoveryNode node : event.nodesDelta().removedNodes()) { - for (ObjectCursor shardStatus : snapshot.shards().values()) { - if (!shardStatus.value.state().completed() && node.getId().equals(shardStatus.value.nodeId())) { - // At least one shard was running on the removed node - we need to fail it - return true; - } - } - } - } - return false; + private static boolean removedNodesCleanupNeeded(SnapshotsInProgress snapshotsInProgress, List removedNodes) { + // If at least one shard was running on a removed node - we need to fail it + return removedNodes.isEmpty() == false && snapshotsInProgress.entries().stream().flatMap(snapshot -> + StreamSupport.stream(((Iterable) () -> snapshot.shards().valuesIt()).spliterator(), false) + .filter(s -> s.state().completed() == false).map(ShardSnapshotStatus::nodeId)) + .anyMatch(removedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet())::contains); } /** @@ -981,25 +961,16 @@ private Tuple, Set> indicesWithMissingShards( * * @param entry snapshot */ - void endSnapshot(final SnapshotsInProgress.Entry entry) { - endSnapshot(entry, null); - } - - - /** - * Finalizes the shard in repository and then removes it from cluster state - *

    - * This is non-blocking method that runs on a thread from SNAPSHOT thread pool - * - * @param entry snapshot - * @param failure failure reason or null if snapshot was successful - */ - private void endSnapshot(final SnapshotsInProgress.Entry entry, final String failure) { + private void endSnapshot(final SnapshotsInProgress.Entry entry) { + if (endingSnapshots.add(entry.snapshot()) == false) { + return; + } threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { @Override protected void doRun() { final Snapshot snapshot = entry.snapshot(); final Repository repository = repositoriesService.repository(snapshot.getRepository()); + final String failure = entry.failure(); logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshot, entry.state(), failure); ArrayList shardFailures = new ArrayList<>(); for (ObjectObjectCursor shardStatus : entry.shards()) { @@ -1015,7 +986,7 @@ protected void doRun() { entry.startTime(), failure, entry.shards().size(), - Collections.unmodifiableList(shardFailures), + unmodifiableList(shardFailures), entry.getRepositoryStateId(), entry.includeGlobalState()); removeSnapshotFromClusterState(snapshot, snapshotInfo, null); @@ -1033,7 +1004,7 @@ public void onFailure(final Exception e) { /** * Removes record of running snapshot from cluster state - * @param snapshot snapshot + * @param snapshot snapshot * @param snapshotInfo snapshot info if snapshot was successful * @param e exception if snapshot failed */ @@ -1043,11 +1014,11 @@ private void removeSnapshotFromClusterState(final Snapshot snapshot, final Snaps /** * Removes record of running snapshot from cluster state and notifies the listener when this action is complete - * @param snapshot snapshot + * @param snapshot snapshot * @param failure exception if snapshot failed * @param listener listener to notify when snapshot information is removed from the cluster state */ - private void removeSnapshotFromClusterState(final Snapshot snapshot, final SnapshotInfo snapshotInfo, final Exception failure, + private void removeSnapshotFromClusterState(final Snapshot snapshot, @Nullable SnapshotInfo snapshotInfo, final Exception failure, @Nullable CleanupAfterErrorListener listener) { clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() { @@ -1065,8 +1036,8 @@ public ClusterState execute(ClusterState currentState) { } } if (changed) { - snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); + return ClusterState.builder(currentState) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); } } return currentState; @@ -1075,6 +1046,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); + endingSnapshots.remove(snapshot); if (listener != null) { listener.onFailure(e); } @@ -1082,8 +1054,9 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { + endingSnapshots.remove(snapshot); if (listener != null) { - listener.onNoLongerMaster(source); + listener.onNoLongerMaster(); } } @@ -1101,6 +1074,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS logger.warn("Failed to notify listeners", e); } } + endingSnapshots.remove(snapshot); if (listener != null) { listener.onResponse(snapshotInfo); } @@ -1131,14 +1105,20 @@ public void deleteSnapshot(final String repositoryName, final String snapshotNam .filter(s -> s.getName().equals(snapshotName)) .findFirst(); // if nothing found by the same name, then look in the cluster state for current in progress snapshots + long repoGenId = repositoryData.getGenId(); if (matchedEntry.isPresent() == false) { - matchedEntry = currentSnapshots(repositoryName, Collections.emptyList()).stream() - .map(e -> e.snapshot().getSnapshotId()).filter(s -> s.getName().equals(snapshotName)).findFirst(); + Optional matchedInProgress = currentSnapshots(repositoryName, Collections.emptyList()).stream() + .filter(s -> s.snapshot().getSnapshotId().getName().equals(snapshotName)).findFirst(); + if (matchedInProgress.isPresent()) { + matchedEntry = matchedInProgress.map(s -> s.snapshot().getSnapshotId()); + // Derive repository generation if a snapshot is in progress because it will increment the generation when it finishes + repoGenId = matchedInProgress.get().getRepositoryStateId() + 1L; + } } if (matchedEntry.isPresent() == false) { throw new SnapshotMissingException(repositoryName, snapshotName); } - deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repositoryData.getGenId(), immediatePriority); + deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repoGenId, immediatePriority); } /** @@ -1201,10 +1181,12 @@ public ClusterState execute(ClusterState currentState) throws Exception { final ImmutableOpenMap shards; final State state = snapshotEntry.state(); + final String failure; if (state == State.INIT) { // snapshot is still initializing, mark it as aborted shards = snapshotEntry.shards(); - + assert shards.isEmpty(); + failure = "Snapshot was aborted during initialization"; } else if (state == State.STARTED) { // snapshot is started - mark every non completed shard as aborted final ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); @@ -1216,7 +1198,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { shardsBuilder.put(shardEntry.key, status); } shards = shardsBuilder.build(); - + failure = "Snapshot was aborted by deletion"; } else { boolean hasUncompletedShards = false; // Cleanup in case a node gone missing and snapshot wasn't updated for some reason @@ -1237,10 +1219,10 @@ public ClusterState execute(ClusterState currentState) throws Exception { // where we force to finish the snapshot logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately"); shards = snapshotEntry.shards(); - endSnapshot(snapshotEntry); } + failure = snapshotEntry.failure(); } - SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards); + SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards, failure); clusterStateBuilder.putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(newSnapshot)); } return clusterStateBuilder.build(); @@ -1391,7 +1373,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS * @param indices list of indices to be snapshotted * @return list of shard to be included into current snapshot */ - private ImmutableOpenMap shards(ClusterState clusterState, List indices) { + private static ImmutableOpenMap shards(ClusterState clusterState, + List indices) { ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); MetaData metaData = clusterState.metaData(); for (IndexId index : indices) { @@ -1416,8 +1399,6 @@ private ImmutableOpenMap shard builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated")); } else if (primary.relocating() || primary.initializing()) { - // The WAITING state was introduced in V1.2.0 - - // don't use it if there are nodes with older version in the cluster builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.WAITING)); } else if (!primary.started()) { builder.put(shardId, From 1089a0e740e84ebb996366b01397d7bb6e535d96 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 1 Mar 2019 10:16:24 +0100 Subject: [PATCH 08/39] Revert "Snapshot Stability Fixes (#39502)" (#39549) This reverts commit 4b725e038f9d05d862c8dc13860656d9a816d04a. --- .../cluster/SnapshotsInProgress.java | 36 +- .../snapshots/SnapshotException.java | 4 + .../snapshots/SnapshotShardsService.java | 378 +++++++------- .../snapshots/SnapshotsService.java | 467 +++++++++--------- 4 files changed, 441 insertions(+), 444 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 73be2ea006656..7308d471afb9d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState.Custom; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -94,11 +93,9 @@ public static class Entry { private final ImmutableOpenMap> waitingIndices; private final long startTime; private final long repositoryStateId; - @Nullable private final String failure; public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, - long startTime, long repositoryStateId, ImmutableOpenMap shards, - String failure) { + long startTime, long repositoryStateId, ImmutableOpenMap shards) { this.state = state; this.snapshot = snapshot; this.includeGlobalState = includeGlobalState; @@ -113,26 +110,15 @@ public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, Sta this.waitingIndices = findWaitingIndices(shards); } this.repositoryStateId = repositoryStateId; - this.failure = failure; - } - - public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, - long startTime, long repositoryStateId, ImmutableOpenMap shards) { - this(snapshot, includeGlobalState, partial, state, indices, startTime, repositoryStateId, shards, null); } public Entry(Entry entry, State state, ImmutableOpenMap shards) { this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, - entry.repositoryStateId, shards, entry.failure); - } - - public Entry(Entry entry, State state, ImmutableOpenMap shards, String failure) { - this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, - entry.repositoryStateId, shards, failure); + entry.repositoryStateId, shards); } public Entry(Entry entry, ImmutableOpenMap shards) { - this(entry, entry.state, shards, entry.failure); + this(entry, entry.state, shards); } public Snapshot snapshot() { @@ -171,10 +157,6 @@ public long getRepositoryStateId() { return repositoryStateId; } - public String failure() { - return failure; - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -455,12 +437,6 @@ public SnapshotsInProgress(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { repositoryStateId = in.readLong(); } - final String failure; - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - failure = in.readOptionalString(); - } else { - failure = null; - } entries[i] = new Entry(snapshot, includeGlobalState, partial, @@ -468,8 +444,7 @@ public SnapshotsInProgress(StreamInput in) throws IOException { Collections.unmodifiableList(indexBuilder), startTime, repositoryStateId, - builder.build(), - failure); + builder.build()); } this.entries = Arrays.asList(entries); } @@ -501,9 +476,6 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { out.writeLong(entry.repositoryStateId); } - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeOptionalString(entry.failure); - } } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java index 05db85d6f7211..d389ed634f3af 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java @@ -51,6 +51,10 @@ public SnapshotException(final Snapshot snapshot, final String msg, final Throwa } } + public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg) { + this(repositoryName, snapshotId, msg, null); + } + public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg, final Throwable cause) { super("[" + repositoryName + ":" + snapshotId + "] " + msg, cause); this.repositoryName = repositoryName; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 116a3f45b0087..3f1cf1db32807 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -69,26 +69,26 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestDeduplicator; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; -import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; import static org.elasticsearch.transport.EmptyTransportResponseHandler.INSTANCE_SAME; @@ -114,11 +114,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final ThreadPool threadPool; - private final Map> shardSnapshots = new HashMap<>(); + private final Lock shutdownLock = new ReentrantLock(); - // A map of snapshots to the shardIds that we already reported to the master as failed - private final TransportRequestDeduplicator remoteFailedRequestDeduplicator = - new TransportRequestDeduplicator<>(); + private final Condition shutdownCondition = shutdownLock.newCondition(); + + private volatile Map> shardSnapshots = emptyMap(); private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; @@ -139,7 +139,7 @@ public SnapshotShardsService(Settings settings, ClusterService clusterService, S } // The constructor of UpdateSnapshotStatusAction will register itself to the TransportService. - this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction( + this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction(settings, UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver); if (DiscoveryNode.isMasterNode(settings)) { @@ -147,6 +147,7 @@ public SnapshotShardsService(Settings settings, ClusterService clusterService, S transportService.registerRequestHandler(UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6, UpdateSnapshotStatusRequestV6::new, ThreadPool.Names.SAME, new UpdateSnapshotStateRequestHandlerV6()); } + } @Override @@ -160,6 +161,16 @@ protected void doStart() { @Override protected void doStop() { + shutdownLock.lock(); + try { + while(!shardSnapshots.isEmpty() && shutdownCondition.await(5, TimeUnit.SECONDS)) { + // Wait for at most 5 second for locally running snapshots to finish + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } finally { + shutdownLock.unlock(); + } } @Override @@ -174,9 +185,7 @@ public void clusterChanged(ClusterChangedEvent event) { SnapshotsInProgress currentSnapshots = event.state().custom(SnapshotsInProgress.TYPE); if ((previousSnapshots == null && currentSnapshots != null) || (previousSnapshots != null && previousSnapshots.equals(currentSnapshots) == false)) { - synchronized (shardSnapshots) { - processIndexShardSnapshots(currentSnapshots, event.state().nodes().getMasterNode()); - } + processIndexShardSnapshots(event); } String previousMasterNodeId = event.previousState().nodes().getMasterNodeId(); @@ -193,14 +202,13 @@ public void clusterChanged(ClusterChangedEvent event) { @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { // abort any snapshots occurring on the soon-to-be closed shard - synchronized (shardSnapshots) { - for (Map.Entry> snapshotShards : shardSnapshots.entrySet()) { - Map shards = snapshotShards.getValue(); - if (shards.containsKey(shardId)) { - logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", - shardId, snapshotShards.getKey().getSnapshotId()); - shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); - } + Map> snapshotShardsMap = shardSnapshots; + for (Map.Entry> snapshotShards : snapshotShardsMap.entrySet()) { + Map shards = snapshotShards.getValue(); + if (shards.containsKey(shardId)) { + logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", + shardId, snapshotShards.getKey().getSnapshotId()); + shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); } } } @@ -215,146 +223,163 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh * @return map of shard id to snapshot status */ public Map currentSnapshotShards(Snapshot snapshot) { - synchronized (shardSnapshots) { - final Map current = shardSnapshots.get(snapshot); - return current == null ? null : new HashMap<>(current); - } + return shardSnapshots.get(snapshot); } /** * Checks if any new shards should be snapshotted on this node * - * @param snapshotsInProgress Current snapshots in progress in cluster state + * @param event cluster state changed event */ - private void processIndexShardSnapshots(SnapshotsInProgress snapshotsInProgress, DiscoveryNode masterNode) { - cancelRemoved(snapshotsInProgress); - if (snapshotsInProgress != null) { - startNewSnapshots(snapshotsInProgress, masterNode); - } - } - - private void cancelRemoved(@Nullable SnapshotsInProgress snapshotsInProgress) { + private void processIndexShardSnapshots(ClusterChangedEvent event) { + SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); + Map> survivors = new HashMap<>(); // First, remove snapshots that are no longer there - Iterator>> it = shardSnapshots.entrySet().iterator(); - while (it.hasNext()) { - final Map.Entry> entry = it.next(); + for (Map.Entry> entry : shardSnapshots.entrySet()) { final Snapshot snapshot = entry.getKey(); - if (snapshotsInProgress == null || snapshotsInProgress.snapshot(snapshot) == null) { + if (snapshotsInProgress != null && snapshotsInProgress.snapshot(snapshot) != null) { + survivors.put(entry.getKey(), entry.getValue()); + } else { // abort any running snapshots of shards for the removed entry; // this could happen if for some reason the cluster state update for aborting // running shards is missed, then the snapshot is removed is a subsequent cluster // state update, which is being processed here - it.remove(); for (IndexShardSnapshotStatus snapshotStatus : entry.getValue().values()) { snapshotStatus.abortIfNotCompleted("snapshot has been removed in cluster state, aborting"); } } } - } - private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress, DiscoveryNode masterNode) { // For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running // snapshots in the future + Map> newSnapshots = new HashMap<>(); // Now go through all snapshots and update existing or create missing - final String localNodeId = clusterService.localNode().getId(); - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - final State entryState = entry.state(); - if (entryState == State.STARTED) { - Map startedShards = null; - final Snapshot snapshot = entry.snapshot(); - Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); - for (ObjectObjectCursor shard : entry.shards()) { - // Add all new shards to start processing on - final ShardId shardId = shard.key; - final ShardSnapshotStatus shardSnapshotStatus = shard.value; - if (localNodeId.equals(shardSnapshotStatus.nodeId()) && shardSnapshotStatus.state() == State.INIT - && snapshotShards.containsKey(shardId) == false) { - logger.trace("[{}] - Adding shard to the queue", shardId); - if (startedShards == null) { - startedShards = new HashMap<>(); + final String localNodeId = event.state().nodes().getLocalNodeId(); + final DiscoveryNode masterNode = event.state().nodes().getMasterNode(); + final Map> snapshotIndices = new HashMap<>(); + if (snapshotsInProgress != null) { + for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { + snapshotIndices.put(entry.snapshot(), + entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity()))); + if (entry.state() == State.STARTED) { + Map startedShards = new HashMap<>(); + Map snapshotShards = shardSnapshots.get(entry.snapshot()); + for (ObjectObjectCursor shard : entry.shards()) { + // Add all new shards to start processing on + if (localNodeId.equals(shard.value.nodeId())) { + if (shard.value.state() == State.INIT && (snapshotShards == null || !snapshotShards.containsKey(shard.key))) { + logger.trace("[{}] - Adding shard to the queue", shard.key); + startedShards.put(shard.key, IndexShardSnapshotStatus.newInitializing()); + } } - startedShards.put(shardId, IndexShardSnapshotStatus.newInitializing()); } - } - if (startedShards != null && startedShards.isEmpty() == false) { - shardSnapshots.computeIfAbsent(snapshot, s -> new HashMap<>()).putAll(startedShards); - startNewShards(entry, startedShards, masterNode); - } - } else if (entryState == State.ABORTED) { - // Abort all running shards for this snapshot - final Snapshot snapshot = entry.snapshot(); - Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); - for (ObjectObjectCursor shard : entry.shards()) { - final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); - if (snapshotStatus != null) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = - snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); - final Stage stage = lastSnapshotStatus.getStage(); - if (stage == Stage.FINALIZE) { - logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + - "letting it finish", snapshot, shard.key); - } else if (stage == Stage.DONE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + - "updating status on the master", snapshot, shard.key); - notifySuccessfulSnapshotShard(snapshot, shard.key, masterNode); - } else if (stage == Stage.FAILURE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + - "updating status on the master", snapshot, shard.key); - notifyFailedSnapshotShard(snapshot, shard.key, lastSnapshotStatus.getFailure(), masterNode); + if (!startedShards.isEmpty()) { + newSnapshots.put(entry.snapshot(), startedShards); + if (snapshotShards != null) { + // We already saw this snapshot but we need to add more started shards + Map shards = new HashMap<>(); + // Put all shards that were already running on this node + shards.putAll(snapshotShards); + // Put all newly started shards + shards.putAll(startedShards); + survivors.put(entry.snapshot(), unmodifiableMap(shards)); + } else { + // Brand new snapshot that we haven't seen before + survivors.put(entry.snapshot(), unmodifiableMap(startedShards)); } - } else { - // due to CS batching we might have missed the INIT state and straight went into ABORTED - // notify master that abort has completed by moving to FAILED - if (shard.value.state() == State.ABORTED) { - notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason(), masterNode); + } + } else if (entry.state() == State.ABORTED) { + // Abort all running shards for this snapshot + Map snapshotShards = shardSnapshots.get(entry.snapshot()); + if (snapshotShards != null) { + final String failure = "snapshot has been aborted"; + for (ObjectObjectCursor shard : entry.shards()) { + final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); + if (snapshotStatus != null) { + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.abortIfNotCompleted(failure); + final Stage stage = lastSnapshotStatus.getStage(); + if (stage == Stage.FINALIZE) { + logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + + "letting it finish", entry.snapshot(), shard.key); + + } else if (stage == Stage.DONE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + + "updating status on the master", entry.snapshot(), shard.key); + notifySuccessfulSnapshotShard(entry.snapshot(), shard.key, localNodeId, masterNode); + + } else if (stage == Stage.FAILURE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + + "updating status on the master", entry.snapshot(), shard.key); + final String snapshotFailure = lastSnapshotStatus.getFailure(); + notifyFailedSnapshotShard(entry.snapshot(), shard.key, localNodeId, snapshotFailure, masterNode); + } + } } } } } } - } - private void startNewShards(SnapshotsInProgress.Entry entry, Map startedShards, - DiscoveryNode masterNode) { - final Snapshot snapshot = entry.snapshot(); - final Map indicesMap = entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())); - final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - for (final Map.Entry shardEntry : startedShards.entrySet()) { - final ShardId shardId = shardEntry.getKey(); - final IndexId indexId = indicesMap.get(shardId.getIndexName()); - assert indexId != null; - executor.execute(new AbstractRunnable() { + // Update the list of snapshots that we saw and tried to started + // If startup of these shards fails later, we don't want to try starting these shards again + shutdownLock.lock(); + try { + shardSnapshots = unmodifiableMap(survivors); + if (shardSnapshots.isEmpty()) { + // Notify all waiting threads that no more snapshots + shutdownCondition.signalAll(); + } + } finally { + shutdownLock.unlock(); + } - private final SetOnce failure = new SetOnce<>(); + // We have new shards to starts + if (newSnapshots.isEmpty() == false) { + Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + for (final Map.Entry> entry : newSnapshots.entrySet()) { + final Snapshot snapshot = entry.getKey(); + final Map indicesMap = snapshotIndices.get(snapshot); + assert indicesMap != null; - @Override - public void doRun() { - final IndexShard indexShard = - indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); - snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); - } + for (final Map.Entry shardEntry : entry.getValue().entrySet()) { + final ShardId shardId = shardEntry.getKey(); + final IndexId indexId = indicesMap.get(shardId.getIndexName()); + executor.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); - failure.set(e); - } + final SetOnce failure = new SetOnce<>(); - @Override - public void onRejection(Exception e) { - failure.set(e); - } + @Override + public void doRun() { + final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + assert indexId != null; + snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); + } - @Override - public void onAfter() { - final Exception exception = failure.get(); - if (exception != null) { - notifyFailedSnapshotShard(snapshot, shardId, ExceptionsHelper.detailedMessage(exception), masterNode); - } else { - notifySuccessfulSnapshotShard(snapshot, shardId, masterNode); - } + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", + shardId, snapshot), e); + failure.set(e); + } + + @Override + public void onRejection(Exception e) { + failure.set(e); + } + + @Override + public void onAfter() { + final Exception exception = failure.get(); + if (exception != null) { + final String failure = ExceptionsHelper.detailedMessage(exception); + notifyFailedSnapshotShard(snapshot, shardId, localNodeId, failure, masterNode); + } else { + notifySuccessfulSnapshotShard(snapshot, shardId, localNodeId, masterNode); + } + } + }); } - }); + } } } @@ -407,6 +432,8 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { if (snapshotsInProgress == null) { return; } + + final String localNodeId = event.state().nodes().getLocalNodeId(); final DiscoveryNode masterNode = event.state().nodes().getMasterNode(); for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { @@ -415,6 +442,7 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { ImmutableOpenMap masterShards = snapshot.shards(); for(Map.Entry localShard : localShards.entrySet()) { ShardId shardId = localShard.getKey(); + IndexShardSnapshotStatus localShardStatus = localShard.getValue(); ShardSnapshotStatus masterShard = masterShards.get(shardId); if (masterShard != null && masterShard.state().completed() == false) { final IndexShardSnapshotStatus.Copy indexShardSnapshotStatus = localShard.getValue().asCopy(); @@ -424,13 +452,14 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { // but we think the shard is done - we need to make new master know that the shard is done logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard is done locally, " + "updating status on the master", snapshot.snapshot(), shardId); - notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, masterNode); + notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localNodeId, masterNode); } else if (stage == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard failed locally, " + "updating status on master", snapshot.snapshot(), shardId); - notifyFailedSnapshotShard(snapshot.snapshot(), shardId, indexShardSnapshotStatus.getFailure(), masterNode); + final String failure = indexShardSnapshotStatus.getFailure(); + notifyFailedSnapshotShard(snapshot.snapshot(), shardId, localNodeId, failure, masterNode); } } } @@ -499,64 +528,34 @@ public String toString() { } /** Notify the master node that the given shard has been successfully snapshotted **/ - private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, DiscoveryNode masterNode) { - sendSnapshotShardUpdate( - snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.SUCCESS), masterNode); + void notifySuccessfulSnapshotShard(final Snapshot snapshot, + final ShardId shardId, + final String localNodeId, + final DiscoveryNode masterNode) { + sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(localNodeId, State.SUCCESS), masterNode); } /** Notify the master node that the given shard failed to be snapshotted **/ - private void notifyFailedSnapshotShard(Snapshot snapshot, ShardId shardId, String failure, DiscoveryNode masterNode) { - sendSnapshotShardUpdate( - snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.FAILED, failure), masterNode); + void notifyFailedSnapshotShard(final Snapshot snapshot, + final ShardId shardId, + final String localNodeId, + final String failure, + final DiscoveryNode masterNode) { + sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(localNodeId, State.FAILED, failure), masterNode); } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ - void sendSnapshotShardUpdate(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status, DiscoveryNode masterNode) { + void sendSnapshotShardUpdate(final Snapshot snapshot, + final ShardId shardId, + final ShardSnapshotStatus status, + final DiscoveryNode masterNode) { try { if (masterNode.getVersion().onOrAfter(Version.V_6_1_0)) { UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME); } else { - remoteFailedRequestDeduplicator.executeOnce( - new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status), - new ActionListener() { - @Override - public void onResponse(Void aVoid) { - logger.trace("[{}] [{}] updated snapshot state", snapshot, status); - } - - @Override - public void onFailure(Exception e) { - logger.warn( - () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); - } - }, - (req, reqListener) -> transportService.sendRequest( - transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, req, - new TransportResponseHandler() { - @Override - public UpdateIndexShardSnapshotStatusResponse read(StreamInput in) throws IOException { - final UpdateIndexShardSnapshotStatusResponse response = new UpdateIndexShardSnapshotStatusResponse(); - response.readFrom(in); - return response; - } - - @Override - public void handleResponse(UpdateIndexShardSnapshotStatusResponse response) { - reqListener.onResponse(null); - } - - @Override - public void handleException(TransportException exp) { - reqListener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }) - ); + UpdateSnapshotStatusRequestV6 requestV6 = new UpdateSnapshotStatusRequestV6(snapshot, shardId, status); + transportService.sendRequest(masterNode, UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6, requestV6, INSTANCE_SAME); } } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); @@ -589,11 +588,11 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS }); } - private class SnapshotStateExecutor implements ClusterStateTaskExecutor { + class SnapshotStateExecutor implements ClusterStateTaskExecutor { @Override public ClusterTasksResult - execute(ClusterState currentState, List tasks) { + execute(ClusterState currentState, List tasks) throws Exception { final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots != null) { int changedCount = 0; @@ -623,6 +622,8 @@ private class SnapshotStateExecutor implements ClusterStateTaskExecutor 0) { logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount); - return ClusterTasksResult.builder().successes(tasks) - .build(ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, - new SnapshotsInProgress(unmodifiableList(entries))).build()); + + final SnapshotsInProgress updatedSnapshots = + new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); + return ClusterTasksResult.builder().successes(tasks).build( + ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build()); } } return ClusterTasksResult.builder().successes(tasks).build(currentState); @@ -643,14 +646,13 @@ static class UpdateIndexShardSnapshotStatusResponse extends ActionResponse { } - private class UpdateSnapshotStatusAction - extends TransportMasterNodeAction { - UpdateSnapshotStatusAction(TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super( - settings, SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, - actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new - ); + class UpdateSnapshotStatusAction extends + TransportMasterNodeAction { + UpdateSnapshotStatusAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, actionName, transportService, clusterService, threadPool, + actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new); } @Override @@ -665,7 +667,7 @@ protected UpdateIndexShardSnapshotStatusResponse newResponse() { @Override protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, - ActionListener listener) { + ActionListener listener) throws Exception { innerUpdateSnapshotState(request, listener); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 998ab2a38639b..c7bf91b476c5b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -83,9 +83,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.stream.Collectors; -import java.util.stream.StreamSupport; -import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; @@ -100,9 +98,9 @@ * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, ActionListener)} method kicks in and initializes * the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state

  • *
  • Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes - * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots} method
  • + * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(ClusterChangedEvent)} method *
  • Once shard snapshot is created data node updates state of the shard in the cluster state using - * the {@link SnapshotShardsService#sendSnapshotShardUpdate} method
  • + * the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus, DiscoveryNode)} method *
  • When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot * as completed
  • *
  • After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository, @@ -123,12 +121,6 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final Map>> snapshotCompletionListeners = new ConcurrentHashMap<>(); - // Set of snapshots that are currently being initialized by this node - private final Set initializingSnapshots = Collections.synchronizedSet(new HashSet<>()); - - // Set of snapshots that are currently being ended by this node - private final Set endingSnapshots = Collections.synchronizedSet(new HashSet<>()); - @Inject public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) { @@ -215,7 +207,7 @@ public List snapshots(final String repositoryName, } final ArrayList snapshotList = new ArrayList<>(snapshotSet); CollectionUtil.timSort(snapshotList); - return unmodifiableList(snapshotList); + return Collections.unmodifiableList(snapshotList); } /** @@ -231,7 +223,7 @@ public List currentSnapshots(final String repositoryName) { snapshotList.add(inProgressSnapshot(entry)); } CollectionUtil.timSort(snapshotList); - return unmodifiableList(snapshotList); + return Collections.unmodifiableList(snapshotList); } /** @@ -277,7 +269,7 @@ public ClusterState execute(ClusterState currentState) { if (snapshots == null || snapshots.entries().isEmpty()) { // Store newSnapshot here to be processed in clusterStateProcessed List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, - request.indicesOptions(), request.indices())); + request.indicesOptions(), request.indices())); logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); List snapshotIndices = repositoryData.resolveNewIndices(indices); newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId), @@ -288,7 +280,6 @@ public ClusterState execute(ClusterState currentState) { System.currentTimeMillis(), repositoryData.getGenId(), null); - initializingSnapshots.add(newSnapshot.snapshot()); snapshots = new SnapshotsInProgress(newSnapshot); } else { throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); @@ -299,9 +290,6 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); - if (newSnapshot != null) { - initializingSnapshots.remove(newSnapshot.snapshot()); - } newSnapshot = null; listener.onFailure(e); } @@ -309,21 +297,7 @@ public void onFailure(String source, Exception e) { @Override public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { if (newSnapshot != null) { - final Snapshot current = newSnapshot.snapshot(); - assert initializingSnapshots.contains(current); - beginSnapshot(newState, newSnapshot, request.partial(), new ActionListener() { - @Override - public void onResponse(final Snapshot snapshot) { - initializingSnapshots.remove(snapshot); - listener.onResponse(snapshot); - } - - @Override - public void onFailure(final Exception e) { - initializingSnapshots.remove(current); - listener.onFailure(e); - } - }); + beginSnapshot(newState, newSnapshot, request.partial(), listener); } } @@ -331,6 +305,7 @@ public void onFailure(final Exception e) { public TimeValue timeout() { return request.masterNodeTimeout(); } + }); } @@ -393,11 +368,8 @@ private void beginSnapshot(final ClusterState clusterState, boolean snapshotCreated; - boolean hadAbortedInitializations; - @Override protected void doRun() { - assert initializingSnapshots.contains(snapshot.snapshot()); Repository repository = repositoriesService.repository(snapshot.snapshot().getRepository()); MetaData metaData = clusterState.metaData(); @@ -422,6 +394,9 @@ protected void doRun() { } clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { + SnapshotsInProgress.Entry endSnapshot; + String failure; + @Override public ClusterState execute(ClusterState currentState) { SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); @@ -432,13 +407,9 @@ public ClusterState execute(ClusterState currentState) { continue; } - if (entry.state() == State.ABORTED) { - entries.add(entry); - assert entry.shards().isEmpty(); - hadAbortedInitializations = true; - } else { - // Replace the snapshot that was just initialized - ImmutableOpenMap shards = + if (entry.state() != State.ABORTED) { + // Replace the snapshot that was just intialized + ImmutableOpenMap shards = shards(currentState, entry.indices()); if (!partial) { Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, @@ -446,6 +417,9 @@ public ClusterState execute(ClusterState currentState) { Set missing = indicesWithMissingShards.v1(); Set closed = indicesWithMissingShards.v2(); if (missing.isEmpty() == false || closed.isEmpty() == false) { + endSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); + entries.add(endSnapshot); + final StringBuilder failureMessage = new StringBuilder(); if (missing.isEmpty() == false) { failureMessage.append("Indices don't have primary shards "); @@ -458,15 +432,24 @@ public ClusterState execute(ClusterState currentState) { failureMessage.append("Indices are closed "); failureMessage.append(closed); } - entries.add(new SnapshotsInProgress.Entry(entry, State.FAILED, shards, failureMessage.toString())); + failure = failureMessage.toString(); continue; } } - entries.add(new SnapshotsInProgress.Entry(entry, State.STARTED, shards)); + SnapshotsInProgress.Entry updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); + entries.add(updatedSnapshot); + if (completed(shards.values())) { + endSnapshot = updatedSnapshot; + } + } else { + assert entry.state() == State.ABORTED : "expecting snapshot to be aborted during initialization"; + failure = "snapshot was aborted during initialization"; + endSnapshot = entry; + entries.add(endSnapshot); } } return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))) .build(); } @@ -495,12 +478,12 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS // should still exist when listener is registered. userCreateSnapshotListener.onResponse(snapshot.snapshot()); - if (hadAbortedInitializations) { - final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE); - assert snapshotsInProgress != null; - final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); - assert entry != null; - endSnapshot(entry); + // Now that snapshot completion listener is registered we can end the snapshot if needed + // We should end snapshot only if 1) we didn't accept it for processing (which happens when there + // is nothing to do) and 2) there was a snapshot in metadata that we should end. Otherwise we should + // go ahead and continue working on this snapshot rather then end here. + if (endSnapshot != null) { + endSnapshot(endSnapshot, failure); } } }); @@ -542,7 +525,7 @@ public void onFailure(Exception e) { cleanupAfterError(e); } - public void onNoLongerMaster() { + public void onNoLongerMaster(String source) { userCreateSnapshotListener.onFailure(e); } @@ -569,7 +552,7 @@ private void cleanupAfterError(Exception exception) { } - private static SnapshotInfo inProgressSnapshot(SnapshotsInProgress.Entry entry) { + private SnapshotInfo inProgressSnapshot(SnapshotsInProgress.Entry entry) { return new SnapshotInfo(entry.snapshot().getSnapshotId(), entry.indices().stream().map(IndexId::getName).collect(Collectors.toList()), entry.startTime(), entry.includeGlobalState()); @@ -627,7 +610,7 @@ public List currentSnapshots(final String repository, builder.add(entry); } } - return unmodifiableList(builder); + return Collections.unmodifiableList(builder); } /** @@ -683,7 +666,7 @@ public Map snapshotShards(final String reposi return unmodifiableMap(shardStatus); } - private static SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { + private SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { for (SnapshotShardFailure shardFailure : shardFailures) { if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) { return shardFailure; @@ -697,28 +680,14 @@ public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { // We don't remove old master when master flips anymore. So, we need to check for change in master - final SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - final boolean newMaster = event.previousState().nodes().isLocalNodeElectedMaster() == false; - if (snapshotsInProgress != null) { - if (newMaster || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes())) { - processSnapshotsOnRemovedNodes(); - } - if (event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event)) { - processStartedShards(); - } - // Cleanup all snapshots that have no more work left: - // 1. Completed snapshots - // 2. Snapshots in state INIT that the previous master failed to start - // 3. Snapshots in any other state that have all their shard tasks completed - snapshotsInProgress.entries().stream().filter( - entry -> entry.state().completed() - || initializingSnapshots.contains(entry.snapshot()) == false - && (entry.state() == State.INIT || completed(entry.shards().values())) - ).forEach(this::endSnapshot); + if (event.nodesRemoved() || event.previousState().nodes().isLocalNodeElectedMaster() == false) { + processSnapshotsOnRemovedNodes(event); } - if (newMaster) { - finalizeSnapshotDeletionFromPreviousMaster(event); + if (event.routingTableChanged()) { + processStartedShards(event); } + removeFinishedSnapshotFromClusterState(event); + finalizeSnapshotDeletionFromPreviousMaster(event); } } catch (Exception e) { logger.warn("Failed to update snapshot state ", e); @@ -737,134 +706,166 @@ public void applyClusterState(ClusterChangedEvent event) { * snapshot was deleted and a call to GET snapshots would reveal that the snapshot no longer exists. */ private void finalizeSnapshotDeletionFromPreviousMaster(ClusterChangedEvent event) { - SnapshotDeletionsInProgress deletionsInProgress = event.state().custom(SnapshotDeletionsInProgress.TYPE); - if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - assert deletionsInProgress.getEntries().size() == 1 : "only one in-progress deletion allowed per cluster"; - SnapshotDeletionsInProgress.Entry entry = deletionsInProgress.getEntries().get(0); - deleteSnapshotFromRepository(entry.getSnapshot(), null, entry.getRepositoryStateId()); + if (event.localNodeMaster() && event.previousState().nodes().isLocalNodeElectedMaster() == false) { + SnapshotDeletionsInProgress deletionsInProgress = event.state().custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + assert deletionsInProgress.getEntries().size() == 1 : "only one in-progress deletion allowed per cluster"; + SnapshotDeletionsInProgress.Entry entry = deletionsInProgress.getEntries().get(0); + deleteSnapshotFromRepository(entry.getSnapshot(), null, entry.getRepositoryStateId()); + } } } /** - * Cleans up shard snapshots that were running on removed nodes + * Removes a finished snapshot from the cluster state. This can happen if the previous + * master node processed a cluster state update that marked the snapshot as finished, + * but the previous master node died before removing the snapshot in progress from the + * cluster state. It is then the responsibility of the new master node to end the + * snapshot and remove it from the cluster state. */ - private void processSnapshotsOnRemovedNodes() { - clusterService.submitStateUpdateTask("update snapshot state after node removal", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - DiscoveryNodes nodes = currentState.nodes(); - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - if (snapshots == null) { - return currentState; - } - boolean changed = false; - ArrayList entries = new ArrayList<>(); - for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { - SnapshotsInProgress.Entry updatedSnapshot = snapshot; - if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { - ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - boolean snapshotChanged = false; - for (ObjectObjectCursor shardEntry : snapshot.shards()) { - ShardSnapshotStatus shardStatus = shardEntry.value; - if (!shardStatus.state().completed() && shardStatus.nodeId() != null) { - if (nodes.nodeExists(shardStatus.nodeId())) { - shards.put(shardEntry.key, shardEntry.value); - } else { - // TODO: Restart snapshot on another node? - snapshotChanged = true; - logger.warn("failing snapshot of shard [{}] on closed node [{}]", - shardEntry.key, shardStatus.nodeId()); - shards.put(shardEntry.key, - new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown")); - } - } - } - if (snapshotChanged) { - changed = true; - ImmutableOpenMap shardsMap = shards.build(); - if (!snapshot.state().completed() && completed(shardsMap.values())) { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shardsMap); - } else { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, snapshot.state(), shardsMap); - } - } - entries.add(updatedSnapshot); - } else if (snapshot.state() == State.INIT && initializingSnapshots.contains(snapshot.snapshot()) == false) { - changed = true; - // Mark the snapshot as aborted as it failed to start from the previous master - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); - entries.add(updatedSnapshot); - - // Clean up the snapshot that failed to start from the old master - deleteSnapshot(snapshot.snapshot(), new ActionListener() { - @Override - public void onResponse(Void aVoid) { - logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshot()); - } - - @Override - public void onFailure(Exception e) { - logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshot()); - } - }, updatedSnapshot.getRepositoryStateId(), false); + private void removeFinishedSnapshotFromClusterState(ClusterChangedEvent event) { + if (event.localNodeMaster() && !event.previousState().nodes().isLocalNodeElectedMaster()) { + SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); + if (snapshotsInProgress != null && !snapshotsInProgress.entries().isEmpty()) { + for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { + if (entry.state().completed()) { + endSnapshot(entry); } } - if (changed) { - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); - } - return currentState; - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn("failed to update snapshot state after node removal"); } - }); + } } - private void processStartedShards() { - clusterService.submitStateUpdateTask("update snapshot state after shards started", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - RoutingTable routingTable = currentState.routingTable(); - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - if (snapshots != null) { + /** + * Cleans up shard snapshots that were running on removed nodes + * + * @param event cluster changed event + */ + private void processSnapshotsOnRemovedNodes(ClusterChangedEvent event) { + if (removedNodesCleanupNeeded(event)) { + // Check if we just became the master + final boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster(); + clusterService.submitStateUpdateTask("update snapshot state after node removal", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + DiscoveryNodes nodes = currentState.nodes(); + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots == null) { + return currentState; + } boolean changed = false; ArrayList entries = new ArrayList<>(); for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { SnapshotsInProgress.Entry updatedSnapshot = snapshot; - if (snapshot.state() == State.STARTED) { - ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), - routingTable); - if (shards != null) { + boolean snapshotChanged = false; + if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { + ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); + for (ObjectObjectCursor shardEntry : snapshot.shards()) { + ShardSnapshotStatus shardStatus = shardEntry.value; + if (!shardStatus.state().completed() && shardStatus.nodeId() != null) { + if (nodes.nodeExists(shardStatus.nodeId())) { + shards.put(shardEntry.key, shardEntry.value); + } else { + // TODO: Restart snapshot on another node? + snapshotChanged = true; + logger.warn("failing snapshot of shard [{}] on closed node [{}]", + shardEntry.key, shardStatus.nodeId()); + shards.put(shardEntry.key, new ShardSnapshotStatus(shardStatus.nodeId(), + State.FAILED, "node shutdown")); + } + } + } + if (snapshotChanged) { changed = true; - if (!snapshot.state().completed() && completed(shards.values())) { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shards); + ImmutableOpenMap shardsMap = shards.build(); + if (!snapshot.state().completed() && completed(shardsMap.values())) { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shardsMap); + endSnapshot(updatedSnapshot); } else { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, shards); + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, snapshot.state(), shardsMap); } } entries.add(updatedSnapshot); + } else if (snapshot.state() == State.INIT && newMaster) { + changed = true; + // Mark the snapshot as aborted as it failed to start from the previous master + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); + entries.add(updatedSnapshot); + + // Clean up the snapshot that failed to start from the old master + deleteSnapshot(snapshot.snapshot(), new ActionListener() { + @Override + public void onResponse(Void aVoid) { + logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshot()); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshot()); + } + }, updatedSnapshot.getRepositoryStateId(), false); } } if (changed) { - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); + snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); + return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); } + return currentState; } - return currentState; - } - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> - new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); - } - }); + @Override + public void onFailure(String source, Exception e) { + logger.warn("failed to update snapshot state after node removal"); + } + }); + } } - private static ImmutableOpenMap processWaitingShards( + private void processStartedShards(ClusterChangedEvent event) { + if (waitingShardsStartedOrUnassigned(event)) { + clusterService.submitStateUpdateTask("update snapshot state after shards started", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + RoutingTable routingTable = currentState.routingTable(); + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots != null) { + boolean changed = false; + ArrayList entries = new ArrayList<>(); + for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { + SnapshotsInProgress.Entry updatedSnapshot = snapshot; + if (snapshot.state() == State.STARTED) { + ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), + routingTable); + if (shards != null) { + changed = true; + if (!snapshot.state().completed() && completed(shards.values())) { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shards); + endSnapshot(updatedSnapshot); + } else { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, shards); + } + } + entries.add(updatedSnapshot); + } + } + if (changed) { + snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); + return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); + } + } + return currentState; + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn(() -> + new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); + } + }); + } + } + + private ImmutableOpenMap processWaitingShards( ImmutableOpenMap snapshotShards, RoutingTable routingTable) { boolean snapshotChanged = false; ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); @@ -904,16 +905,19 @@ private static ImmutableOpenMap processWaitingShar } } - private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snapshotsInProgress, ClusterChangedEvent event) { - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - if (entry.state() == State.STARTED) { - for (ObjectCursor index : entry.waitingIndices().keys()) { - if (event.indexRoutingTableChanged(index.value)) { - IndexRoutingTable indexShardRoutingTable = event.state().getRoutingTable().index(index.value); - for (ShardId shardId : entry.waitingIndices().get(index.value)) { - ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.id()).primaryShard(); - if (shardRouting != null && (shardRouting.started() || shardRouting.unassigned())) { - return true; + private boolean waitingShardsStartedOrUnassigned(ClusterChangedEvent event) { + SnapshotsInProgress curr = event.state().custom(SnapshotsInProgress.TYPE); + if (curr != null) { + for (SnapshotsInProgress.Entry entry : curr.entries()) { + if (entry.state() == State.STARTED && !entry.waitingIndices().isEmpty()) { + for (ObjectCursor index : entry.waitingIndices().keys()) { + if (event.indexRoutingTableChanged(index.value)) { + IndexRoutingTable indexShardRoutingTable = event.state().getRoutingTable().index(index.value); + for (ShardId shardId : entry.waitingIndices().get(index.value)) { + ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.id()).primaryShard(); + if (shardRouting != null && (shardRouting.started() || shardRouting.unassigned())) { + return true; + } } } } @@ -923,12 +927,28 @@ private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snap return false; } - private static boolean removedNodesCleanupNeeded(SnapshotsInProgress snapshotsInProgress, List removedNodes) { - // If at least one shard was running on a removed node - we need to fail it - return removedNodes.isEmpty() == false && snapshotsInProgress.entries().stream().flatMap(snapshot -> - StreamSupport.stream(((Iterable) () -> snapshot.shards().valuesIt()).spliterator(), false) - .filter(s -> s.state().completed() == false).map(ShardSnapshotStatus::nodeId)) - .anyMatch(removedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet())::contains); + private boolean removedNodesCleanupNeeded(ClusterChangedEvent event) { + SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); + if (snapshotsInProgress == null) { + return false; + } + // Check if we just became the master + boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster(); + for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { + if (newMaster && (snapshot.state() == State.SUCCESS || snapshot.state() == State.INIT)) { + // We just replaced old master and snapshots in intermediate states needs to be cleaned + return true; + } + for (DiscoveryNode node : event.nodesDelta().removedNodes()) { + for (ObjectCursor shardStatus : snapshot.shards().values()) { + if (!shardStatus.value.state().completed() && node.getId().equals(shardStatus.value.nodeId())) { + // At least one shard was running on the removed node - we need to fail it + return true; + } + } + } + } + return false; } /** @@ -961,16 +981,25 @@ private Tuple, Set> indicesWithMissingShards( * * @param entry snapshot */ - private void endSnapshot(final SnapshotsInProgress.Entry entry) { - if (endingSnapshots.add(entry.snapshot()) == false) { - return; - } + void endSnapshot(final SnapshotsInProgress.Entry entry) { + endSnapshot(entry, null); + } + + + /** + * Finalizes the shard in repository and then removes it from cluster state + *

    + * This is non-blocking method that runs on a thread from SNAPSHOT thread pool + * + * @param entry snapshot + * @param failure failure reason or null if snapshot was successful + */ + private void endSnapshot(final SnapshotsInProgress.Entry entry, final String failure) { threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { @Override protected void doRun() { final Snapshot snapshot = entry.snapshot(); final Repository repository = repositoriesService.repository(snapshot.getRepository()); - final String failure = entry.failure(); logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshot, entry.state(), failure); ArrayList shardFailures = new ArrayList<>(); for (ObjectObjectCursor shardStatus : entry.shards()) { @@ -986,7 +1015,7 @@ protected void doRun() { entry.startTime(), failure, entry.shards().size(), - unmodifiableList(shardFailures), + Collections.unmodifiableList(shardFailures), entry.getRepositoryStateId(), entry.includeGlobalState()); removeSnapshotFromClusterState(snapshot, snapshotInfo, null); @@ -1004,7 +1033,7 @@ public void onFailure(final Exception e) { /** * Removes record of running snapshot from cluster state - * @param snapshot snapshot + * @param snapshot snapshot * @param snapshotInfo snapshot info if snapshot was successful * @param e exception if snapshot failed */ @@ -1014,11 +1043,11 @@ private void removeSnapshotFromClusterState(final Snapshot snapshot, final Snaps /** * Removes record of running snapshot from cluster state and notifies the listener when this action is complete - * @param snapshot snapshot + * @param snapshot snapshot * @param failure exception if snapshot failed * @param listener listener to notify when snapshot information is removed from the cluster state */ - private void removeSnapshotFromClusterState(final Snapshot snapshot, @Nullable SnapshotInfo snapshotInfo, final Exception failure, + private void removeSnapshotFromClusterState(final Snapshot snapshot, final SnapshotInfo snapshotInfo, final Exception failure, @Nullable CleanupAfterErrorListener listener) { clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() { @@ -1036,8 +1065,8 @@ public ClusterState execute(ClusterState currentState) { } } if (changed) { - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); + snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); + return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); } } return currentState; @@ -1046,7 +1075,6 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); - endingSnapshots.remove(snapshot); if (listener != null) { listener.onFailure(e); } @@ -1054,9 +1082,8 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { - endingSnapshots.remove(snapshot); if (listener != null) { - listener.onNoLongerMaster(); + listener.onNoLongerMaster(source); } } @@ -1074,7 +1101,6 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS logger.warn("Failed to notify listeners", e); } } - endingSnapshots.remove(snapshot); if (listener != null) { listener.onResponse(snapshotInfo); } @@ -1105,20 +1131,14 @@ public void deleteSnapshot(final String repositoryName, final String snapshotNam .filter(s -> s.getName().equals(snapshotName)) .findFirst(); // if nothing found by the same name, then look in the cluster state for current in progress snapshots - long repoGenId = repositoryData.getGenId(); if (matchedEntry.isPresent() == false) { - Optional matchedInProgress = currentSnapshots(repositoryName, Collections.emptyList()).stream() - .filter(s -> s.snapshot().getSnapshotId().getName().equals(snapshotName)).findFirst(); - if (matchedInProgress.isPresent()) { - matchedEntry = matchedInProgress.map(s -> s.snapshot().getSnapshotId()); - // Derive repository generation if a snapshot is in progress because it will increment the generation when it finishes - repoGenId = matchedInProgress.get().getRepositoryStateId() + 1L; - } + matchedEntry = currentSnapshots(repositoryName, Collections.emptyList()).stream() + .map(e -> e.snapshot().getSnapshotId()).filter(s -> s.getName().equals(snapshotName)).findFirst(); } if (matchedEntry.isPresent() == false) { throw new SnapshotMissingException(repositoryName, snapshotName); } - deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repoGenId, immediatePriority); + deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repositoryData.getGenId(), immediatePriority); } /** @@ -1181,12 +1201,10 @@ public ClusterState execute(ClusterState currentState) throws Exception { final ImmutableOpenMap shards; final State state = snapshotEntry.state(); - final String failure; if (state == State.INIT) { // snapshot is still initializing, mark it as aborted shards = snapshotEntry.shards(); - assert shards.isEmpty(); - failure = "Snapshot was aborted during initialization"; + } else if (state == State.STARTED) { // snapshot is started - mark every non completed shard as aborted final ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); @@ -1198,7 +1216,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { shardsBuilder.put(shardEntry.key, status); } shards = shardsBuilder.build(); - failure = "Snapshot was aborted by deletion"; + } else { boolean hasUncompletedShards = false; // Cleanup in case a node gone missing and snapshot wasn't updated for some reason @@ -1219,10 +1237,10 @@ public ClusterState execute(ClusterState currentState) throws Exception { // where we force to finish the snapshot logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately"); shards = snapshotEntry.shards(); + endSnapshot(snapshotEntry); } - failure = snapshotEntry.failure(); } - SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards, failure); + SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards); clusterStateBuilder.putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(newSnapshot)); } return clusterStateBuilder.build(); @@ -1373,8 +1391,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS * @param indices list of indices to be snapshotted * @return list of shard to be included into current snapshot */ - private static ImmutableOpenMap shards(ClusterState clusterState, - List indices) { + private ImmutableOpenMap shards(ClusterState clusterState, List indices) { ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); MetaData metaData = clusterState.metaData(); for (IndexId index : indices) { @@ -1399,6 +1416,8 @@ private static ImmutableOpenMap Date: Fri, 1 Mar 2019 00:10:30 +0100 Subject: [PATCH 09/39] SQL: Enhance checks for inexact fields (#39427) For functions: move checks for `text` fields without underlying `keyword` fields or with many of them (ambiguity) to the type resolution stage. For Order By/Group By: move checks to the `Verifier` to catch early before `QueryTranslator` or execution. Closes: #38501 Fixes: #35203 --- .../sql/qa/src/main/resources/docs.csv-spec | 22 +-- .../xpack/sql/analysis/analyzer/Verifier.java | 19 ++- .../sql/execution/search/SourceGenerator.java | 8 +- .../xpack/sql/expression/Expressions.java | 57 -------- .../xpack/sql/expression/FieldAttribute.java | 7 +- .../xpack/sql/expression/Order.java | 8 +- .../xpack/sql/expression/TypeResolutions.java | 129 ++++++++++++++++++ .../function/aggregate/AggregateFunction.java | 9 +- .../expression/function/aggregate/Max.java | 5 +- .../expression/function/aggregate/Min.java | 5 +- .../function/aggregate/NumericAggregate.java | 5 +- .../function/aggregate/Percentile.java | 14 +- .../function/aggregate/PercentileRank.java | 14 +- .../function/aggregate/TopHits.java | 39 +++--- .../function/grouping/Histogram.java | 11 +- .../scalar/datetime/BaseDateTimeFunction.java | 5 +- .../scalar/math/BinaryNumericFunction.java | 6 +- .../function/scalar/math/MathFunction.java | 4 +- .../scalar/string/BinaryStringFunction.java | 6 +- .../string/BinaryStringNumericFunction.java | 4 +- .../string/BinaryStringStringFunction.java | 4 +- .../function/scalar/string/Concat.java | 11 +- .../function/scalar/string/Insert.java | 14 +- .../function/scalar/string/Locate.java | 16 +-- .../function/scalar/string/Replace.java | 11 +- .../function/scalar/string/Substring.java | 12 +- .../scalar/string/UnaryStringFunction.java | 6 +- .../scalar/string/UnaryStringIntFunction.java | 4 +- .../predicate/logical/BinaryLogic.java | 4 +- .../sql/expression/predicate/logical/Not.java | 5 +- .../arithmetic/ArithmeticOperation.java | 6 +- .../predicate/operator/arithmetic/Neg.java | 5 +- .../operator/comparison/BinaryComparison.java | 5 +- .../predicate/operator/comparison/In.java | 13 +- .../predicate/regex/RegexMatch.java | 8 ++ .../xpack/sql/planner/QueryTranslator.java | 18 +-- .../elasticsearch/xpack/sql/type/EsField.java | 41 ++++-- .../xpack/sql/type/InvalidMappedField.java | 10 +- .../xpack/sql/type/KeywordEsField.java | 6 +- .../xpack/sql/type/TextEsField.java | 40 ++++-- .../xpack/sql/type/UnsupportedEsField.java | 13 +- .../analyzer/FieldAttributeTests.java | 18 +-- .../analyzer/VerifierErrorMessagesTests.java | 125 +++++++++++++---- .../sql/planner/QueryTranslatorTests.java | 19 +-- 44 files changed, 509 insertions(+), 282 deletions(-) create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec index ac2fb87236b95..fba4fd9b47743 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec @@ -2338,25 +2338,25 @@ Arumugam //////////// limitationSubSelect // tag::limitationSubSelect -SELECT * FROM (SELECT first_name, last_name FROM emp WHERE last_name NOT LIKE '%a%') WHERE first_name LIKE 'A%'; +SELECT * FROM (SELECT first_name, last_name FROM emp WHERE last_name NOT LIKE '%a%') WHERE first_name LIKE 'A%' ORDER BY 1; first_name | last_name ---------------+--------------- -Anneke |Preusig -Alejandro |McAlpine -Anoosh |Peyn -Arumugam |Ossenbruggen + Alejandro |McAlpine + Anneke |Preusig + Anoosh |Peyn + Arumugam |Ossenbruggen // end::limitationSubSelect ; -limitationSubSelect +limitationSubSelectRewritten // tag::limitationSubSelectRewritten -SELECT first_name, last_name FROM emp WHERE last_name NOT LIKE '%a%' AND first_name LIKE 'A%'; +SELECT first_name, last_name FROM emp WHERE last_name NOT LIKE '%a%' AND first_name LIKE 'A%' ORDER BY 1; // end::limitationSubSelectRewritten first_name | last_name ---------------+--------------- -Anneke |Preusig -Alejandro |McAlpine -Anoosh |Peyn -Arumugam |Ossenbruggen + Alejandro |McAlpine + Anneke |Preusig + Anoosh |Peyn + Arumugam |Ossenbruggen ; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index 52d53538bb2f8..47c53e772d5dd 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.sql.stats.Metrics; import org.elasticsearch.xpack.sql.tree.Node; import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.util.StringUtils; import java.util.ArrayList; @@ -294,7 +295,8 @@ Collection verify(LogicalPlan plan) { */ private static boolean checkGroupBy(LogicalPlan p, Set localFailures, Map resolvedFunctions, Set groupingFailures) { - return checkGroupByAgg(p, localFailures, resolvedFunctions) + return checkGroupByInexactField(p, localFailures) + && checkGroupByAgg(p, localFailures, resolvedFunctions) && checkGroupByOrder(p, localFailures, groupingFailures) && checkGroupByHaving(p, localFailures, groupingFailures, resolvedFunctions); } @@ -463,6 +465,21 @@ private static boolean checkGroupByHavingHasOnlyAggs(Expression e, Set localFailures) { + if (p instanceof Aggregate) { + Aggregate a = (Aggregate) p; + + // The grouping can not be an aggregate function or an inexact field (e.g. text without a keyword) + a.groupings().forEach(e -> e.forEachUp(c -> { + EsField.Exact exact = c.getExactInfo(); + if (exact.hasExact() == false) { + localFailures.add(fail(c, "Field of data type [" + c.dataType().typeName + "] cannot be used for grouping; " + + exact.errorMsg())); + } + }, FieldAttribute.class)); + } + return true; + } // check whether plain columns specified in an agg are mentioned in the group-by private static boolean checkGroupByAgg(LogicalPlan p, Set localFailures, Map functions) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java index c22b1213d09dc..47ffcf697328c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java @@ -33,6 +33,8 @@ public abstract class SourceGenerator { + private SourceGenerator() {} + private static final List NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_); public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryBuilder filter, Integer size) { @@ -107,8 +109,7 @@ private static void sorting(QueryContainer container, SearchSourceBuilder source // sorting only works on not-analyzed fields - look for a multi-field replacement if (attr instanceof FieldAttribute) { - FieldAttribute fa = (FieldAttribute) attr; - fa = fa.isInexact() ? fa.exactAttribute() : fa; + FieldAttribute fa = ((FieldAttribute) attr).exactAttribute(); sortBuilder = fieldSort(fa.name()) .missing(as.missing().position()) @@ -125,7 +126,8 @@ private static void sorting(QueryContainer container, SearchSourceBuilder source if (nestedSort == null) { fieldSort.setNestedSort(newSort); } else { - for (; nestedSort.getNestedSort() != null; nestedSort = nestedSort.getNestedSort()) { + while (nestedSort.getNestedSort() != null) { + nestedSort = nestedSort.getNestedSort(); } nestedSort.setNestedSort(newSort); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java index 648aff5254561..ca5e4b757567c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -6,22 +6,16 @@ package org.elasticsearch.xpack.sql.expression; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; -import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.type.DataType; -import org.elasticsearch.xpack.sql.type.DataTypes; import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Locale; -import java.util.StringJoiner; import java.util.function.Predicate; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; -import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; public final class Expressions { @@ -154,55 +148,4 @@ public static List pipe(List expressions) { } return pipes; } - - public static TypeResolution typeMustBeBoolean(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, dt -> dt == BOOLEAN, operationName, paramOrd, "boolean"); - } - - public static TypeResolution typeMustBeInteger(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, DataType::isInteger, operationName, paramOrd, "integer"); - } - - public static TypeResolution typeMustBeNumeric(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, DataType::isNumeric, operationName, paramOrd, "numeric"); - } - - public static TypeResolution typeMustBeString(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, DataType::isString, operationName, paramOrd, "string"); - } - - public static TypeResolution typeMustBeDate(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, DataType::isDateBased, operationName, paramOrd, "date", "datetime"); - } - - public static TypeResolution typeMustBeNumericOrDate(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, dt -> dt.isNumeric() || dt.isDateBased(), operationName, paramOrd, "date", "datetime", "numeric"); - } - - public static TypeResolution typeMustBe(Expression e, - Predicate predicate, - String operationName, - ParamOrdinal paramOrd, - String... acceptedTypes) { - return predicate.test(e.dataType()) || DataTypes.isNull(e.dataType())? - TypeResolution.TYPE_RESOLVED : - new TypeResolution(format(null, "[{}]{} argument must be [{}], found value [{}] type [{}]", - operationName, - paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : " " + paramOrd.name().toLowerCase(Locale.ROOT), - acceptedTypesForErrorMsg(acceptedTypes), - Expressions.name(e), - e.dataType().typeName)); - } - - private static String acceptedTypesForErrorMsg(String... acceptedTypes) { - StringJoiner sj = new StringJoiner(", "); - for (int i = 0; i < acceptedTypes.length - 1; i++) { - sj.add(acceptedTypes[i]); - } - if (acceptedTypes.length > 1) { - return sj.toString() + " or " + acceptedTypes[acceptedTypes.length - 1]; - } else { - return acceptedTypes[0]; - } - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java index 942a597c850c9..3dbd1dcbeb2cf 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java @@ -81,12 +81,13 @@ public FieldAttribute nestedParent() { return nestedParent; } - public boolean isInexact() { - return field.isExact() == false; + public EsField.Exact getExactInfo() { + return field.getExactInfo(); } public FieldAttribute exactAttribute() { - if (field.isExact() == false) { + EsField exactField = field.getExactField(); + if (exactField.equals(field) == false) { return innerField(field.getExactField()); } return this; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java index 6a57c3275d4d1..267a8827d8cd6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java @@ -5,14 +5,15 @@ */ package org.elasticsearch.xpack.sql.expression; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import java.util.List; import java.util.Objects; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isExact; public class Order extends Expression { @@ -45,6 +46,11 @@ public Nullability nullable() { return Nullability.FALSE; } + @Override + protected TypeResolution resolveType() { + return isExact(child, "ORDER BY cannot be applied to field of data type [{}]: {}"); + } + @Override public DataType dataType() { return child.dataType(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java new file mode 100644 index 0000000000000..61bc8ed44a9a8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.type.EsField; + +import java.util.Locale; +import java.util.StringJoiner; +import java.util.function.Predicate; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; +import static org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; +import static org.elasticsearch.xpack.sql.expression.Expressions.name; +import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; + +public final class TypeResolutions { + + private TypeResolutions() {} + + public static TypeResolution isBoolean(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, dt -> dt == BOOLEAN, operationName, paramOrd, "boolean"); + } + + public static TypeResolution isInteger(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isInteger, operationName, paramOrd, "integer"); + } + + public static TypeResolution isNumeric(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isNumeric, operationName, paramOrd, "numeric"); + } + + public static TypeResolution isString(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isString, operationName, paramOrd, "string"); + } + + public static TypeResolution isDate(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isDateBased, operationName, paramOrd, "date", "datetime"); + } + + public static TypeResolution isNumericOrDate(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, dt -> dt.isNumeric() || dt.isDateBased(), operationName, paramOrd, "date", "datetime", "numeric"); + } + + public static TypeResolution isExact(Expression e, String message) { + if (e instanceof FieldAttribute) { + EsField.Exact exact = ((FieldAttribute) e).getExactInfo(); + if (exact.hasExact() == false) { + return new TypeResolution(format(null, message, e.dataType().typeName, exact.errorMsg())); + } + } + return TypeResolution.TYPE_RESOLVED; + } + + public static TypeResolution isExact(Expression e, String operationName, ParamOrdinal paramOrd) { + if (e instanceof FieldAttribute) { + EsField.Exact exact = ((FieldAttribute) e).getExactInfo(); + if (exact.hasExact() == false) { + return new TypeResolution(format(null, "[{}] cannot operate on {}field of data type [{}]: {}", + operationName, + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? + "" : paramOrd.name().toLowerCase(Locale.ROOT) + " argument ", + e.dataType().typeName, exact.errorMsg())); + } + } + return TypeResolution.TYPE_RESOLVED; + } + + public static TypeResolution isStringAndExact(Expression e, String operationName, ParamOrdinal paramOrd) { + TypeResolution resolution = isString(e, operationName, paramOrd); + if (resolution.unresolved()) { + return resolution; + } + + return isExact(e, operationName, paramOrd); + } + + public static TypeResolution isFoldable(Expression e, String operationName, ParamOrdinal paramOrd) { + if (!e.foldable()) { + return new TypeResolution(format(null, "{}argument of [{}] must be a constant, received [{}]", + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : paramOrd.name().toLowerCase(Locale.ROOT) + " ", + operationName, + Expressions.name(e))); + } + return TypeResolution.TYPE_RESOLVED; + } + + public static TypeResolution isNotFoldable(Expression e, String operationName, ParamOrdinal paramOrd) { + if (e.foldable()) { + return new TypeResolution(format(null, "{}argument of [{}] must be a table column, found constant [{}]", + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : paramOrd.name().toLowerCase(Locale.ROOT) + " ", + operationName, + Expressions.name(e))); + } + return TypeResolution.TYPE_RESOLVED; + } + + public static TypeResolution isType(Expression e, + Predicate predicate, + String operationName, + ParamOrdinal paramOrd, + String... acceptedTypes) { + return predicate.test(e.dataType()) || DataTypes.isNull(e.dataType())? + TypeResolution.TYPE_RESOLVED : + new TypeResolution(format(null, "{}argument of [{}] must be [{}], found value [{}] type [{}]", + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : paramOrd.name().toLowerCase(Locale.ROOT) + " ", + operationName, + acceptedTypesForErrorMsg(acceptedTypes), + name(e), + e.dataType().typeName)); + } + + private static String acceptedTypesForErrorMsg(String... acceptedTypes) { + StringJoiner sj = new StringJoiner(", "); + for (int i = 0; i < acceptedTypes.length - 1; i++) { + sj.add(acceptedTypes[i]); + } + if (acceptedTypes.length > 1) { + return sj.toString() + " or " + acceptedTypes[acceptedTypes.length - 1]; + } else { + return acceptedTypes[0]; + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java index b432c5063a64b..177f598dc9a46 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java @@ -7,6 +7,8 @@ import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.TypeResolutions; import org.elasticsearch.xpack.sql.expression.function.Function; import org.elasticsearch.xpack.sql.expression.gen.pipeline.AggNameInput; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -78,8 +80,13 @@ public boolean equals(Object obj) { && Objects.equals(other.parameters(), parameters()); } + @Override + protected TypeResolution resolveType() { + return TypeResolutions.isExact(field, sourceText(), Expressions.ParamOrdinal.DEFAULT); + } + @Override public int hashCode() { return Objects.hash(field(), parameters()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java index 898c98463445e..cd03ea85e4558 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -14,6 +13,8 @@ import java.util.List; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumericOrDate; + /** * Find the maximum value in matching documents. */ @@ -48,7 +49,7 @@ protected TypeResolution resolveType() { if (field().dataType().isString()) { return TypeResolution.TYPE_RESOLVED; } else { - return Expressions.typeMustBeNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); + return isNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java index 8652759fca486..07fa44769b2db 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -14,6 +13,8 @@ import java.util.List; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumericOrDate; + /** * Find the minimum value in matched documents. */ @@ -51,7 +52,7 @@ protected TypeResolution resolveType() { if (field().dataType().isString()) { return TypeResolution.TYPE_RESOLVED; } else { - return Expressions.typeMustBeNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); + return isNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java index bfe0d2ded7e34..21d5c23d23a5a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java @@ -6,13 +6,14 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import java.util.List; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + abstract class NumericAggregate extends AggregateFunction { NumericAggregate(Source source, Expression field, List parameters) { @@ -25,7 +26,7 @@ abstract class NumericAggregate extends AggregateFunction { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); + return isNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java index 76c7bda320012..a0585f4c02176 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -16,7 +15,8 @@ import java.util.List; import static java.util.Collections.singletonList; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isFoldable; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; public class Percentile extends NumericAggregate implements EnclosedAgg { @@ -42,17 +42,17 @@ public Percentile replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { - if (!percent.foldable()) { - return new TypeResolution(format(null, "Second argument of PERCENTILE must be a constant, received [{}]", - Expressions.name(percent))); + TypeResolution resolution = isFoldable(percent, sourceText(), ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; } - TypeResolution resolution = super.resolveType(); + resolution = super.resolveType(); if (resolution.unresolved()) { return resolution; } - return Expressions.typeMustBeNumeric(percent, sourceText(), ParamOrdinal.DEFAULT); + return isNumeric(percent, sourceText(), ParamOrdinal.DEFAULT); } public Expression percent() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java index b30b38a01b6c5..da8c487ff31f9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -16,7 +15,8 @@ import java.util.List; import static java.util.Collections.singletonList; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isFoldable; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; public class PercentileRank extends AggregateFunction implements EnclosedAgg { @@ -42,17 +42,17 @@ public Expression replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { - if (!value.foldable()) { - return new TypeResolution(format(null, "Second argument of PERCENTILE_RANK must be a constant, received [{}]", - Expressions.name(value))); + TypeResolution resolution = isFoldable(value, sourceText(), ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; } - TypeResolution resolution = super.resolveType(); + resolution = super.resolveType(); if (resolution.unresolved()) { return resolution; } - return Expressions.typeMustBeNumeric(value, sourceText(), ParamOrdinal.DEFAULT); + return isNumeric(value, sourceText(), ParamOrdinal.DEFAULT); } public Expression value() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/TopHits.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/TopHits.java index 5ec96ea41c87b..7525d3e232748 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/TopHits.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/TopHits.java @@ -5,16 +5,15 @@ */ package org.elasticsearch.xpack.sql.expression.function.aggregate; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; -import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.TypeResolutions; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import java.util.Collections; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNotFoldable; /** * Super class of Aggregation functions on field types other than numeric, that need to be @@ -37,29 +36,25 @@ public DataType dataType() { @Override protected TypeResolution resolveType() { - if (field().foldable()) { - return new TypeResolution(format(null, "First argument of [{}] must be a table column, found constant [{}]", - functionName(), - Expressions.name(field()))); + TypeResolution resolution = isNotFoldable(field(), sourceText(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; } - try { - ((FieldAttribute) field()).exactAttribute(); - } catch (MappingException ex) { - return new TypeResolution(format(null, "[{}] cannot operate on first argument field of data type [{}]", - functionName(), field().dataType().typeName)); + + resolution = TypeResolutions.isExact(field(), sourceText(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; } if (orderField() != null) { - if (orderField().foldable()) { - return new TypeResolution(format(null, "Second argument of [{}] must be a table column, found constant [{}]", - functionName(), - Expressions.name(orderField()))); + resolution = isNotFoldable(orderField(), sourceText(), ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; } - try { - ((FieldAttribute) orderField()).exactAttribute(); - } catch (MappingException ex) { - return new TypeResolution(format(null, "[{}] cannot operate on second argument field of data type [{}]", - functionName(), orderField().dataType().typeName)); + + resolution = TypeResolutions.isExact(orderField(), sourceText(), ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; } } return TypeResolution.TYPE_RESOLVED; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java index 23061bfea1859..9cb752de5e69a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.expression.function.grouping; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -20,6 +19,10 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumericOrDate; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isType; + public class Histogram extends GroupingFunction { private final Literal interval; @@ -41,13 +44,13 @@ public ZoneId zoneId() { @Override protected TypeResolution resolveType() { - TypeResolution resolution = Expressions.typeMustBeNumericOrDate(field(), "HISTOGRAM", ParamOrdinal.FIRST); + TypeResolution resolution = isNumericOrDate(field(), "HISTOGRAM", ParamOrdinal.FIRST); if (resolution == TypeResolution.TYPE_RESOLVED) { // interval must be Literal interval if (field().dataType().isDateBased()) { - resolution = Expressions.typeMustBe(interval, DataTypes::isInterval, "(Date) HISTOGRAM", ParamOrdinal.SECOND, "interval"); + resolution = isType(interval, DataTypes::isInterval, "(Date) HISTOGRAM", ParamOrdinal.SECOND, "interval"); } else { - resolution = Expressions.typeMustBeNumeric(interval, "(Numeric) HISTOGRAM", ParamOrdinal.SECOND); + resolution = isNumeric(interval, "(Numeric) HISTOGRAM", ParamOrdinal.SECOND); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java index fa949007ef58a..cae78a42e55e9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -17,6 +16,8 @@ import java.time.ZonedDateTime; import java.util.Objects; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isDate; + abstract class BaseDateTimeFunction extends UnaryScalarFunction { private final ZoneId zoneId; @@ -35,7 +36,7 @@ protected final NodeInfo info() { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeDate(field(), sourceText(), ParamOrdinal.DEFAULT); + return isDate(field(), sourceText(), ParamOrdinal.DEFAULT); } public ZoneId zoneId() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java index f3369bf14a457..8230329454992 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java @@ -16,6 +16,8 @@ import java.util.Objects; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + public abstract class BinaryNumericFunction extends BinaryScalarFunction { private final BinaryMathOperation operation; @@ -36,12 +38,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = Expressions.typeMustBeNumeric(left(), sourceText(), ParamOrdinal.FIRST); + TypeResolution resolution = isNumeric(left(), sourceText(), ParamOrdinal.FIRST); if (resolution.unresolved()) { return resolution; } - return Expressions.typeMustBeNumeric(right(), sourceText(), ParamOrdinal.SECOND); + return isNumeric(right(), sourceText(), ParamOrdinal.SECOND); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java index e0555ab0ea3bf..4389e1ac814a9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.math; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; @@ -18,6 +17,7 @@ import java.util.Objects; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; public abstract class MathFunction extends UnaryScalarFunction { @@ -56,7 +56,7 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - return Expressions.typeMustBeNumeric(field(), operation().toString(), ParamOrdinal.DEFAULT); + return isNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java index fd294564b642c..611e86507ee5b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java @@ -16,7 +16,7 @@ import java.util.Objects; import java.util.function.BiFunction; -import static org.elasticsearch.xpack.sql.expression.Expressions.typeMustBeString; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; /** @@ -42,7 +42,7 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = typeMustBeString(left(), sourceText(), ParamOrdinal.FIRST); + TypeResolution resolution = isStringAndExact(left(), sourceText(), ParamOrdinal.FIRST); if (resolution.unresolved()) { return resolution; } @@ -67,7 +67,7 @@ protected String scriptMethodName() { @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java index d9f767d1ce81a..fac0646c2c611 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java @@ -12,6 +12,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + /** * A binary string function with a numeric second parameter and a string result */ @@ -26,7 +28,7 @@ public BinaryStringNumericFunction(Source source, Expression left, Expression ri @Override protected TypeResolution resolveSecondParameterInputType(Expression e) { - return Expressions.typeMustBeNumeric(e, sourceText(), Expressions.ParamOrdinal.SECOND); + return isNumeric(e, sourceText(), Expressions.ParamOrdinal.SECOND); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java index 51189f6a4efff..eaa6ac428c85f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java @@ -10,6 +10,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; + /** * A binary string function with two string parameters and a numeric result */ @@ -21,7 +23,7 @@ public BinaryStringStringFunction(Source source, Expression left, Expression rig @Override protected TypeResolution resolveSecondParameterInputType(Expression e) { - return Expressions.typeMustBeString(e, sourceText(), Expressions.ParamOrdinal.SECOND); + return isStringAndExact(e, sourceText(), Expressions.ParamOrdinal.SECOND); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java index 1f9833133a98a..4e461d919a93a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.ConcatFunctionProcessor.process; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -37,12 +38,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = Expressions.typeMustBeString(left(), sourceText(), ParamOrdinal.FIRST); - if (sourceResolution.unresolved()) { - return sourceResolution; + TypeResolution resolution = isStringAndExact(left(), functionName(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; } - return Expressions.typeMustBeString(right(), sourceText(), ParamOrdinal.SECOND); + return isStringAndExact(right(), functionName(), ParamOrdinal.SECOND); } @Override @@ -78,7 +79,7 @@ protected NodeInfo info() { @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java index 8e6fc2052928e..8cff98d4c7c80 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java @@ -21,6 +21,8 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.InsertFunctionProcessor.doProcess; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -46,22 +48,22 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = Expressions.typeMustBeString(source, sourceText(), ParamOrdinal.FIRST); + TypeResolution sourceResolution = isStringAndExact(source, sourceText(), ParamOrdinal.FIRST); if (sourceResolution.unresolved()) { return sourceResolution; } - - TypeResolution startResolution = Expressions.typeMustBeNumeric(start, sourceText(), ParamOrdinal.SECOND); + + TypeResolution startResolution = isNumeric(start, sourceText(), ParamOrdinal.SECOND); if (startResolution.unresolved()) { return startResolution; } - TypeResolution lengthResolution = Expressions.typeMustBeNumeric(length, sourceText(), ParamOrdinal.THIRD); + TypeResolution lengthResolution = isNumeric(length, sourceText(), ParamOrdinal.THIRD); if (lengthResolution.unresolved()) { return lengthResolution; } - return Expressions.typeMustBeString(replacement, sourceText(), ParamOrdinal.FOURTH); + return isStringAndExact(replacement, sourceText(), ParamOrdinal.FOURTH); } @Override @@ -119,7 +121,7 @@ private ScriptTemplate asScriptFrom(ScriptTemplate sourceScript, ScriptTemplate @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java index 042ec1a736373..806e6fab8e465 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java @@ -21,6 +21,8 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.LocateFunctionProcessor.doProcess; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -48,19 +50,17 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution patternResolution = Expressions.typeMustBeString(pattern, sourceText(), ParamOrdinal.FIRST); + TypeResolution patternResolution = isStringAndExact(pattern, sourceText(), ParamOrdinal.FIRST); if (patternResolution.unresolved()) { return patternResolution; } - TypeResolution sourceResolution = Expressions.typeMustBeString(source, sourceText(), ParamOrdinal.SECOND); + TypeResolution sourceResolution = isStringAndExact(source, sourceText(), ParamOrdinal.SECOND); if (sourceResolution.unresolved()) { return sourceResolution; } - - return start == null ? - TypeResolution.TYPE_RESOLVED : - Expressions.typeMustBeNumeric(start, sourceText(), ParamOrdinal.THIRD); + + return start == null ? TypeResolution.TYPE_RESOLVED : isNumeric(start, sourceText(), ParamOrdinal.THIRD); } @Override @@ -80,7 +80,7 @@ protected NodeInfo info() { public boolean foldable() { return pattern.foldable() && source.foldable() - && (start == null? true : start.foldable()); + && (start == null || start.foldable()); } @Override @@ -122,7 +122,7 @@ private ScriptTemplate asScriptFrom(ScriptTemplate patternScript, ScriptTemplate @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java index cd960f1f3b3ea..a1150fc5d38af 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java @@ -21,6 +21,7 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.ReplaceFunctionProcessor.doProcess; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -44,17 +45,17 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = Expressions.typeMustBeString(source, sourceText(), ParamOrdinal.FIRST); + TypeResolution sourceResolution = isStringAndExact(source, sourceText(), ParamOrdinal.FIRST); if (sourceResolution.unresolved()) { return sourceResolution; } - TypeResolution patternResolution = Expressions.typeMustBeString(pattern, sourceText(), ParamOrdinal.SECOND); + TypeResolution patternResolution = isStringAndExact(pattern, sourceText(), ParamOrdinal.SECOND); if (patternResolution.unresolved()) { return patternResolution; } - return Expressions.typeMustBeString(replacement, sourceText(), ParamOrdinal.THIRD); + return isStringAndExact(replacement, sourceText(), ParamOrdinal.THIRD); } @Override @@ -107,7 +108,7 @@ private ScriptTemplate asScriptFrom(ScriptTemplate sourceScript, ScriptTemplate @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } @@ -124,4 +125,4 @@ public Expression replaceChildren(List newChildren) { return new Replace(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java index 75178e73fce46..a341a6bb8c203 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java @@ -21,6 +21,8 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isInteger; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.SubstringFunctionProcessor.doProcess; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -45,17 +47,17 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = Expressions.typeMustBeString(source, sourceText(), ParamOrdinal.FIRST); + TypeResolution sourceResolution = isStringAndExact(source, sourceText(), ParamOrdinal.FIRST); if (sourceResolution.unresolved()) { return sourceResolution; } - TypeResolution startResolution = Expressions.typeMustBeNumeric(start, sourceText(), ParamOrdinal.SECOND); + TypeResolution startResolution = isInteger(start, sourceText(), ParamOrdinal.SECOND); if (startResolution.unresolved()) { return startResolution; } - return Expressions.typeMustBeNumeric(length, sourceText(), ParamOrdinal.THIRD); + return isInteger(length, sourceText(), ParamOrdinal.THIRD); } @Override @@ -107,7 +109,7 @@ protected ScriptTemplate asScriptFrom(ScriptTemplate sourceScript, ScriptTemplat @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } @@ -124,4 +126,4 @@ public Expression replaceChildren(List newChildren) { return new Substring(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java index ef3944a9093a4..b2c72f0f5b6d7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.string; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; @@ -20,6 +19,7 @@ import java.util.Objects; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; public abstract class UnaryStringFunction extends UnaryScalarFunction { @@ -43,7 +43,7 @@ protected TypeResolution resolveType() { if (!childrenResolved()) { return new TypeResolution("Unresolved children"); } - return Expressions.typeMustBeString(field(), operation().toString(), ParamOrdinal.DEFAULT); + return isStringAndExact(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override @@ -57,7 +57,7 @@ protected Processor makeProcessor() { public ScriptTemplate scriptWithField(FieldAttribute field) { //TODO change this to use _source instead of the exact form (aka field.keyword for text fields) return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java index 5603a29d81d7c..7d9a64e59cdc2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.string; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; @@ -19,6 +18,7 @@ import java.util.Objects; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isInteger; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; /** @@ -45,7 +45,7 @@ protected TypeResolution resolveType() { if (!childrenResolved()) { return new TypeResolution("Unresolved children"); } - return Expressions.typeMustBeInteger(field(), operation().toString(), ParamOrdinal.DEFAULT); + return isInteger(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java index 930636657fc48..dfc5232cddd2d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java @@ -14,6 +14,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isBoolean; + public abstract class BinaryLogic extends BinaryOperator { protected BinaryLogic(Source source, Expression left, Expression right, BinaryLogicOperation operation) { @@ -27,7 +29,7 @@ public DataType dataType() { @Override protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { - return Expressions.typeMustBeBoolean(e, sourceText(), paramOrdinal); + return isBoolean(e, sourceText(), paramOrdinal); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java index bbadaa6ef6430..88e20d187f343 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.predicate.logical; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -16,6 +15,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isBoolean; + public class Not extends UnaryScalarFunction { public Not(Source source, Expression child) { @@ -37,7 +38,7 @@ protected TypeResolution resolveType() { if (DataType.BOOLEAN == field().dataType()) { return TypeResolution.TYPE_RESOLVED; } - return Expressions.typeMustBeBoolean(field(), sourceText(), ParamOrdinal.DEFAULT); + return isBoolean(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java index d1d28e3683863..157f34dc404a2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java @@ -14,6 +14,8 @@ import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + public abstract class ArithmeticOperation extends BinaryOperator { private DataType dataType; @@ -24,7 +26,7 @@ protected ArithmeticOperation(Source source, Expression left, Expression right, @Override protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { - return Expressions.typeMustBeNumeric(e, symbol(), paramOrdinal); + return isNumeric(e, sourceText(), paramOrdinal); } @Override @@ -44,4 +46,4 @@ public DataType dataType() { protected Pipe makePipe() { return new BinaryArithmeticPipe(source(), this, Expressions.pipe(left()), Expressions.pipe(right()), function()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java index 3ada9a523a13c..a8c7f87d6380d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -16,6 +15,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + /** * Negation function (@{code -x}). */ @@ -37,7 +38,7 @@ protected Neg replaceChild(Expression newChild) { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); + return isNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java index 4e7473907c890..fdd33af077b40 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.TypeResolutions; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; @@ -22,7 +23,7 @@ protected BinaryComparison(Source source, Expression left, Expression right, Bin @Override protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { - return TypeResolution.TYPE_RESOLVED; + return TypeResolutions.isExact(e, sourceText(), paramOrdinal); } @Override @@ -43,4 +44,4 @@ protected Pipe makePipe() { public static Integer compare(Object left, Object right) { return Comparisons.compare(left, right); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java index f8809b0a9519d..04dc3b45b7e38 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java @@ -5,12 +5,11 @@ */ package org.elasticsearch.xpack.sql.expression.predicate.operator.comparison; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; -import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.expression.Nullability; +import org.elasticsearch.xpack.sql.expression.TypeResolutions; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; @@ -109,13 +108,9 @@ protected Pipe makePipe() { @Override protected TypeResolution resolveType() { - if (value instanceof FieldAttribute) { - try { - ((FieldAttribute) value).exactAttribute(); - } catch (MappingException e) { - return new TypeResolution(format(null, "[{}] cannot operate on field of data type [{}]: {}", - functionName(), value().dataType().esType, e.getMessage())); - } + TypeResolution resolution = TypeResolutions.isExact(value, functionName(), Expressions.ParamOrdinal.DEFAULT); + if (resolution != TypeResolution.TYPE_RESOLVED) { + return resolution; } for (Expression ex : list) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java index ed65b1fcaf9cb..b3c09c67fc6c7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.predicate.regex; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Nullability; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -14,6 +15,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; + public abstract class RegexMatch extends UnaryScalarFunction { private final String pattern; @@ -36,6 +39,11 @@ public Nullability nullable() { return field().nullable(); } + @Override + protected TypeResolution resolveType() { + return isStringAndExact(field(), sourceText(), Expressions.ParamOrdinal.DEFAULT); + } + @Override public boolean foldable() { // right() is not directly foldable in any context but Like can fold it. diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 57d1c016c608c..42becd95c6eae 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -257,10 +257,7 @@ static GroupingContext groupBy(List groupings) { // change analyzed to non non-analyzed attributes if (exp instanceof FieldAttribute) { - FieldAttribute fa = (FieldAttribute) exp; - if (fa.isInexact()) { - ne = fa.exactAttribute(); - } + ne = ((FieldAttribute) exp).exactAttribute(); } // handle functions differently @@ -448,7 +445,7 @@ static String field(AggregateFunction af) { // COUNT(DISTINCT) uses cardinality aggregation which works on exact values (not changed by analyzers or normalizers) if (af instanceof Count && ((Count) af).distinct()) { // use the `keyword` version of the field, if there is one - return field.isInexact() ? field.exactAttribute().name() : field.name(); + return field.exactAttribute().name(); } return field.name(); } @@ -481,9 +478,7 @@ protected QueryTranslation asQuery(RegexMatch e, boolean onAggs) { String target = null; if (e.field() instanceof FieldAttribute) { - FieldAttribute fa = (FieldAttribute) e.field(); - inexact = fa.isInexact(); - target = nameOf(inexact ? fa.exactAttribute() : fa); + target = nameOf(((FieldAttribute) e.field()).exactAttribute()); } else { throw new SqlIllegalArgumentException("Scalar function ({}) not allowed (yet) as arguments for LIKE", Expressions.name(e.field())); @@ -683,12 +678,9 @@ private static Query translateQuery(BinaryComparison bc) { } if (bc instanceof Equals || bc instanceof NullEquals || bc instanceof NotEquals) { if (bc.left() instanceof FieldAttribute) { - FieldAttribute fa = (FieldAttribute) bc.left(); // equality should always be against an exact match // (which is important for strings) - if (fa.isInexact()) { - name = fa.exactAttribute().name(); - } + name = ((FieldAttribute) bc.left()).exactAttribute().name(); } Query query = new TermQuery(source, name, value); if (bc instanceof NotEquals) { @@ -726,7 +718,7 @@ protected QueryTranslation asQuery(In in, boolean onAggs) { if (in.value() instanceof FieldAttribute) { FieldAttribute fa = (FieldAttribute) in.value(); // equality should always be against an exact match (which is important for strings) - q = new TermsQuery(in.source(), fa.isInexact() ? fa.exactAttribute().name() : fa.name(), in.list()); + q = new TermsQuery(in.source(), fa.exactAttribute().name(), in.list()); } else { q = new ScriptQuery(in.source(), in.asScript()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java index 47a2904adb7a7..52f531ba6e41e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java @@ -14,6 +14,7 @@ * SQL-related information about an index field */ public class EsField { + private final DataType esDataType; private final boolean aggregatable; private final Map properties; @@ -58,7 +59,9 @@ public Map getProperties() { /** * Returns the path to the keyword version of this field if this field is text and it has a subfield that is - * indexed as keyword, null if such field is not found or the field name itself in all other cases + * indexed as keyword, throws an exception if such field is not found or the field name itself in all other cases. + * To avoid the exception {@link EsField#getExactInfo()} should be used beforehand, to check if an exact field exists + * and if not get the errorMessage which explains why is that. */ public EsField getExactField() { return this; @@ -76,13 +79,14 @@ public int getPrecision() { } /** - * True if this field name can be used in sorting, aggregations and term queries as is - *

    - * This will be true for most fields except analyzed text fields that cannot be used directly and should be - * replaced with the field returned by {@link EsField#getExactField()} instead. + * Returns and {@link Exact} object with all the necessary info about the field: + *

      + *
    • If it has an exact underlying field or not
    • + *
    • and if not an error message why it doesn't
    • + *
    */ - public boolean isExact() { - return true; + public Exact getExactInfo() { + return Exact.EXACT_FIELD; } @Override @@ -108,4 +112,25 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(esDataType, aggregatable, properties, name); } -} \ No newline at end of file + + public static final class Exact { + + private static Exact EXACT_FIELD = new Exact(true, null); + + private boolean hasExact; + private String errorMsg; + + public Exact(boolean hasExact, String errorMsg) { + this.hasExact = hasExact; + this.errorMsg = errorMsg; + } + + public boolean hasExact() { + return hasExact; + } + + public String errorMsg() { + return errorMsg; + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/InvalidMappedField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/InvalidMappedField.java index 59bb94c78c86e..79f8eb1c20c1f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/InvalidMappedField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/InvalidMappedField.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.type; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import java.util.Objects; @@ -46,12 +46,12 @@ public boolean equals(Object obj) { @Override public EsField getExactField() { - throw new MappingException("Field [" + getName() + "] is invalid, cannot access it"); + throw new SqlIllegalArgumentException("Field [" + getName() + "] is invalid, cannot access it"); } @Override - public boolean isExact() { - return false; + public Exact getExactInfo() { + return new Exact(false, "Field [" + getName() + "] is invalid, cannot access it"); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java index d40fa7b19af92..3b77608fc8bed 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java @@ -33,8 +33,8 @@ public int getPrecision() { } @Override - public boolean isExact() { - return normalized == false; + public Exact getExactInfo() { + return new Exact(normalized == false, "Normalized keyword field cannot be used for exact match operations"); } @Override @@ -52,4 +52,4 @@ public int hashCode() { return Objects.hash(super.hashCode(), precision, normalized); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java index f1c596a301c54..4944a472e2104 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java @@ -5,9 +5,11 @@ */ package org.elasticsearch.xpack.sql.type; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import java.util.Map; +import java.util.function.Function; /** * SQL-related information about an index field with text type @@ -20,25 +22,41 @@ public TextEsField(String name, Map properties, boolean hasDocV @Override public EsField getExactField() { + Tuple findExact = findExact(); + if (findExact.v1() == null) { + throw new SqlIllegalArgumentException(findExact.v2()); + } + return findExact.v1(); + } + + @Override + public Exact getExactInfo() { + return PROCESS_EXACT_FIELD.apply(findExact()); + } + + private Tuple findExact() { EsField field = null; for (EsField property : getProperties().values()) { - if (property.getDataType() == DataType.KEYWORD && property.isExact()) { + if (property.getDataType() == DataType.KEYWORD && property.getExactInfo().hasExact()) { if (field != null) { - throw new MappingException("Multiple exact keyword candidates available for [" + getName() + - "]; specify which one to use"); + return new Tuple<>(null, "Multiple exact keyword candidates available for [" + getName() + + "]; specify which one to use"); } field = property; } } if (field == null) { - throw new MappingException("No keyword/multi-field defined exact matches for [" + getName() + - "]; define one or use MATCH/QUERY instead"); + return new Tuple<>(null, "No keyword/multi-field defined exact matches for [" + getName() + + "]; define one or use MATCH/QUERY instead"); } - return field; + return new Tuple<>(field, null); } - @Override - public boolean isExact() { - return false; - } + private Function, Exact> PROCESS_EXACT_FIELD = tuple -> { + if (tuple.v1() == null) { + return new Exact(false, tuple.v2()); + } else { + return new Exact(true, null); + } + }; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java index c88d676c223b6..2909c5f199053 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java @@ -26,16 +26,21 @@ public String getOriginalType() { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } UnsupportedEsField that = (UnsupportedEsField) o; return Objects.equals(originalType, that.originalType); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), originalType); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java index 607810efc666a..bc7b85b5392e9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java @@ -6,10 +6,10 @@ package org.elasticsearch.xpack.sql.analysis.analyzer; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.TestUtils; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; @@ -113,9 +113,9 @@ public void testExactKeyword() { assertThat(attr.path(), is("some")); assertThat(attr.name(), is("some.string")); assertThat(attr.dataType(), is(DataType.TEXT)); - assertThat(attr.isInexact(), is(true)); + assertTrue(attr.getExactInfo().hasExact()); FieldAttribute exact = attr.exactAttribute(); - assertThat(exact.isInexact(), is(false)); + assertTrue(exact.getExactInfo().hasExact()); assertThat(exact.name(), is("some.string.typical")); assertThat(exact.dataType(), is(KEYWORD)); } @@ -125,9 +125,11 @@ public void testAmbiguousExactKeyword() { assertThat(attr.path(), is("some")); assertThat(attr.name(), is("some.ambiguous")); assertThat(attr.dataType(), is(DataType.TEXT)); - assertThat(attr.isInexact(), is(true)); - MappingException me = expectThrows(MappingException.class, () -> attr.exactAttribute()); - assertThat(me.getMessage(), + assertFalse(attr.getExactInfo().hasExact()); + assertThat(attr.getExactInfo().errorMsg(), + is("Multiple exact keyword candidates available for [ambiguous]; specify which one to use")); + SqlIllegalArgumentException e = expectThrows(SqlIllegalArgumentException.class, () -> attr.exactAttribute()); + assertThat(e.getMessage(), is("Multiple exact keyword candidates available for [ambiguous]; specify which one to use")); } @@ -136,7 +138,7 @@ public void testNormalizedKeyword() { assertThat(attr.path(), is("some.string")); assertThat(attr.name(), is("some.string.normalized")); assertThat(attr.dataType(), is(KEYWORD)); - assertThat(attr.isInexact(), is(true)); + assertFalse(attr.getExactInfo().hasExact()); } public void testDottedFieldPath() { @@ -197,4 +199,4 @@ public void testFieldAmbiguity() { assertThat(attribute.qualifier(), is("test")); assertThat(attribute.name(), is("test.test")); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index eec483ca219b8..dfeb44dfe2165 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -259,7 +259,7 @@ public void testGroupByNegativeOrdinal() { } public void testGroupByOrderByAliasedInSelectAllowed() { - LogicalPlan lp = accept("SELECT text t FROM test GROUP BY text ORDER BY t"); + LogicalPlan lp = accept("SELECT int i FROM test GROUP BY int ORDER BY i"); assertNotNull(lp); } @@ -292,6 +292,12 @@ public void testStarOnNested() { assertNotNull(accept("SELECT dep.* FROM test")); } + public void testGroupByOnInexact() { + assertEquals("1:36: Field of data type [text] cannot be used for grouping; " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT COUNT(*) FROM test GROUP BY text")); + } + public void testGroupByOnNested() { assertEquals("1:38: Grouping isn't (yet) compatible with nested fields [dep.dep_id]", error("SELECT dep.dep_id FROM test GROUP BY dep.dep_id")); @@ -322,6 +328,18 @@ public void testUnsupportedTypeInFilter() { error("SELECT * FROM test WHERE unsupported > 1")); } + public void testTermEqualitOnInexact() { + assertEquals("1:26: [text = 'value'] cannot operate on first argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT * FROM test WHERE text = 'value'")); + } + + public void testTermEqualityOnAmbiguous() { + assertEquals("1:26: [some.ambiguous = 'value'] cannot operate on first argument field of data type [text]: " + + "Multiple exact keyword candidates available for [ambiguous]; specify which one to use", + error("SELECT * FROM test WHERE some.ambiguous = 'value'")); + } + public void testUnsupportedTypeInFunction() { assertEquals("1:12: Cannot use field [unsupported] type [ip_range] as is unsupported", error("SELECT ABS(unsupported) FROM test")); @@ -332,6 +350,12 @@ public void testUnsupportedTypeInOrder() { error("SELECT * FROM test ORDER BY unsupported")); } + public void testInexactFieldInOrder() { + assertEquals("1:29: ORDER BY cannot be applied to field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT * FROM test ORDER BY text")); + } + public void testGroupByOrderByAggregate() { accept("SELECT AVG(int) a FROM test GROUP BY bool ORDER BY a"); } @@ -416,65 +440,106 @@ public void testInOnFieldTextWithNoKeyword() { } public void testNotSupportedAggregateOnDate() { - assertEquals("1:8: [AVG(date)] argument must be [numeric], found value [date] type [datetime]", + assertEquals("1:8: argument of [AVG(date)] must be [numeric], found value [date] type [datetime]", error("SELECT AVG(date) FROM test")); } - public void testInvalidTypeForStringFunction_WithOneArg() { - assertEquals("1:8: [LENGTH] argument must be [string], found value [1] type [integer]", + public void testInvalidTypeForStringFunction_WithOneArgString() { + assertEquals("1:8: argument of [LENGTH(1)] must be [string], found value [1] type [integer]", error("SELECT LENGTH(1)")); } + public void testInvalidTypeForStringFunction_WithOneArgNumeric() { + assertEquals("1:8: argument of [CHAR('foo')] must be [integer], found value ['foo'] type [keyword]", + error("SELECT CHAR('foo')")); + } + + public void testInvalidTypeForNestedStringFunctions_WithOneArg() { + assertEquals("1:14: argument of [CHAR('foo')] must be [integer], found value ['foo'] type [keyword]", + error("SELECT ASCII(CHAR('foo'))")); + } + public void testInvalidTypeForNumericFunction_WithOneArg() { - assertEquals("1:8: [COS] argument must be [numeric], found value ['foo'] type [keyword]", + assertEquals("1:8: argument of [COS('foo')] must be [numeric], found value ['foo'] type [keyword]", error("SELECT COS('foo')")); } public void testInvalidTypeForBooleanFunction_WithOneArg() { - assertEquals("1:8: [NOT 'foo'] argument must be [boolean], found value ['foo'] type [keyword]", + assertEquals("1:8: argument of [NOT 'foo'] must be [boolean], found value ['foo'] type [keyword]", error("SELECT NOT 'foo'")); } public void testInvalidTypeForStringFunction_WithTwoArgs() { - assertEquals("1:8: [CONCAT(1, 'bar')] first argument must be [string], found value [1] type [integer]", + assertEquals("1:8: first argument of [CONCAT] must be [string], found value [1] type [integer]", error("SELECT CONCAT(1, 'bar')")); - assertEquals("1:8: [CONCAT('foo', 2)] second argument must be [string], found value [2] type [integer]", + assertEquals("1:8: second argument of [CONCAT] must be [string], found value [2] type [integer]", error("SELECT CONCAT('foo', 2)")); } public void testInvalidTypeForNumericFunction_WithTwoArgs() { - assertEquals("1:8: [TRUNCATE('foo', 2)] first argument must be [numeric], found value ['foo'] type [keyword]", + assertEquals("1:8: first argument of [TRUNCATE('foo', 2)] must be [numeric], found value ['foo'] type [keyword]", error("SELECT TRUNCATE('foo', 2)")); - assertEquals("1:8: [TRUNCATE(1.2, 'bar')] second argument must be [numeric], found value ['bar'] type [keyword]", + assertEquals("1:8: second argument of [TRUNCATE(1.2, 'bar')] must be [numeric], found value ['bar'] type [keyword]", error("SELECT TRUNCATE(1.2, 'bar')")); } public void testInvalidTypeForBooleanFuntion_WithTwoArgs() { - assertEquals("1:8: [1 OR true] first argument must be [boolean], found value [1] type [integer]", + assertEquals("1:8: first argument of [1 OR true] must be [boolean], found value [1] type [integer]", error("SELECT 1 OR true")); - assertEquals("1:8: [true OR 2] second argument must be [boolean], found value [2] type [integer]", + assertEquals("1:8: second argument of [true OR 2] must be [boolean], found value [2] type [integer]", error("SELECT true OR 2")); } - public void testInvalidTypeForFunction_WithThreeArgs() { - assertEquals("1:8: [REPLACE(1, 'foo', 'bar')] first argument must be [string], found value [1] type [integer]", + public void testInvalidTypeForReplace() { + assertEquals("1:8: first argument of [REPLACE(1, 'foo', 'bar')] must be [string], found value [1] type [integer]", error("SELECT REPLACE(1, 'foo', 'bar')")); - assertEquals("1:8: [REPLACE('text', 2, 'bar')] second argument must be [string], found value [2] type [integer]", - error("SELECT REPLACE('text', 2, 'bar')")); - assertEquals("1:8: [REPLACE('text', 'foo', 3)] third argument must be [string], found value [3] type [integer]", - error("SELECT REPLACE('text', 'foo', 3)")); + assertEquals("1:8: [REPLACE(text, 'foo', 'bar')] cannot operate on first argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT REPLACE(text, 'foo', 'bar') FROM test")); + + assertEquals("1:8: second argument of [REPLACE('foo', 2, 'bar')] must be [string], found value [2] type [integer]", + error("SELECT REPLACE('foo', 2, 'bar')")); + assertEquals("1:8: [REPLACE('foo', text, 'bar')] cannot operate on second argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT REPLACE('foo', text, 'bar') FROM test")); + + assertEquals("1:8: third argument of [REPLACE('foo', 'bar', 3)] must be [string], found value [3] type [integer]", + error("SELECT REPLACE('foo', 'bar', 3)")); + assertEquals("1:8: [REPLACE('foo', 'bar', text)] cannot operate on third argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT REPLACE('foo', 'bar', text) FROM test")); + } + + public void testInvalidTypeForSubString() { + assertEquals("1:8: first argument of [SUBSTRING(1, 2, 3)] must be [string], found value [1] type [integer]", + error("SELECT SUBSTRING(1, 2, 3)")); + assertEquals("1:8: [SUBSTRING(text, 2, 3)] cannot operate on first argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT SUBSTRING(text, 2, 3) FROM test")); + + assertEquals("1:8: second argument of [SUBSTRING('foo', 'bar', 3)] must be [integer], found value ['bar'] type [keyword]", + error("SELECT SUBSTRING('foo', 'bar', 3)")); + + assertEquals("1:8: third argument of [SUBSTRING('foo', 2, 'bar')] must be [integer], found value ['bar'] type [keyword]", + error("SELECT SUBSTRING('foo', 2, 'bar')")); } public void testInvalidTypeForFunction_WithFourArgs() { - assertEquals("1:8: [INSERT(1, 1, 2, 'new')] first argument must be [string], found value [1] type [integer]", + assertEquals("1:8: first argument of [INSERT(1, 1, 2, 'new')] must be [string], found value [1] type [integer]", error("SELECT INSERT(1, 1, 2, 'new')")); - assertEquals("1:8: [INSERT('text', 'foo', 2, 'new')] second argument must be [numeric], found value ['foo'] type [keyword]", + assertEquals("1:8: second argument of [INSERT('text', 'foo', 2, 'new')] must be [numeric], found value ['foo'] type [keyword]", error("SELECT INSERT('text', 'foo', 2, 'new')")); - assertEquals("1:8: [INSERT('text', 1, 'bar', 'new')] third argument must be [numeric], found value ['bar'] type [keyword]", + assertEquals("1:8: third argument of [INSERT('text', 1, 'bar', 'new')] must be [numeric], found value ['bar'] type [keyword]", error("SELECT INSERT('text', 1, 'bar', 'new')")); - assertEquals("1:8: [INSERT('text', 1, 2, 3)] fourth argument must be [string], found value [3] type [integer]", + assertEquals("1:8: fourth argument of [INSERT('text', 1, 2, 3)] must be [string], found value [3] type [integer]", error("SELECT INSERT('text', 1, 2, 3)")); } + + public void testInvalidTypeForRegexMatch() { + assertEquals("1:26: [text LIKE 'foo'] cannot operate on field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT * FROM test WHERE text LIKE 'foo'")); + } public void testAllowCorrectFieldsInIncompatibleMappings() { assertNotNull(incompatibleAccept("SELECT languages FROM \"*\"")); @@ -616,32 +681,34 @@ public void testScalarOfHistogramNotInGrouping() { } public void testErrorMessageForPercentileWithSecondArgBasedOnAField() { - assertEquals("1:8: Second argument of PERCENTILE must be a constant, received [ABS(int)]", + assertEquals("1:8: second argument of [PERCENTILE(int, ABS(int))] must be a constant, received [ABS(int)]", error("SELECT PERCENTILE(int, ABS(int)) FROM test")); } public void testErrorMessageForPercentileRankWithSecondArgBasedOnAField() { - assertEquals("1:8: Second argument of PERCENTILE_RANK must be a constant, received [ABS(int)]", + assertEquals("1:8: second argument of [PERCENTILE_RANK(int, ABS(int))] must be a constant, received [ABS(int)]", error("SELECT PERCENTILE_RANK(int, ABS(int)) FROM test")); } public void testTopHitsFirstArgConstant() { - assertEquals("1:8: First argument of [FIRST] must be a table column, found constant ['foo']", + assertEquals("1:8: first argument of [FIRST('foo', int)] must be a table column, found constant ['foo']", error("SELECT FIRST('foo', int) FROM test")); } public void testTopHitsSecondArgConstant() { - assertEquals("1:8: Second argument of [LAST] must be a table column, found constant [10]", + assertEquals("1:8: second argument of [LAST(int, 10)] must be a table column, found constant [10]", error("SELECT LAST(int, 10) FROM test")); } public void testTopHitsFirstArgTextWithNoKeyword() { - assertEquals("1:8: [FIRST] cannot operate on first argument field of data type [text]", + assertEquals("1:8: [FIRST(text)] cannot operate on first argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", error("SELECT FIRST(text) FROM test")); } public void testTopHitsSecondArgTextWithNoKeyword() { - assertEquals("1:8: [LAST] cannot operate on second argument field of data type [text]", + assertEquals("1:8: [LAST(keyword, text)] cannot operate on second argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", error("SELECT LAST(keyword, text) FROM test")); } @@ -671,4 +738,4 @@ public void testAggregateAliasInFilter() { public void testProjectUnresolvedAliasInFilter() { assertEquals("1:8: Unknown column [tni]", error("SELECT tni AS i FROM test WHERE i > 10 GROUP BY i")); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index d0a3ce68b4b9e..f927f23864902 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; @@ -110,16 +109,6 @@ public void testTermEqualityAnalyzer() { assertEquals("value", tq.value()); } - public void testTermEqualityAnalyzerAmbiguous() { - LogicalPlan p = plan("SELECT some.string FROM test WHERE some.ambiguous = 'value'"); - assertTrue(p instanceof Project); - p = ((Project) p).child(); - assertTrue(p instanceof Filter); - Expression condition = ((Filter) p).condition(); - // the message is checked elsewhere (in FieldAttributeTests) - expectThrows(MappingException.class, () -> QueryTranslator.toQuery(condition, false)); - } - public void testTermEqualityNotAnalyzed() { LogicalPlan p = plan("SELECT some.string FROM test WHERE int = 5"); assertTrue(p instanceof Project); @@ -640,7 +629,7 @@ public void testTopHitsAggregationWithOneArg() { EsQueryExec eqe = (EsQueryExec) p; assertEquals(1, eqe.output().size()); assertEquals("FIRST(keyword)", eqe.output().get(0).qualifiedName()); - assertTrue(eqe.output().get(0).dataType() == DataType.KEYWORD); + assertEquals(DataType.KEYWORD, eqe.output().get(0).dataType()); assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," + @@ -652,7 +641,7 @@ public void testTopHitsAggregationWithOneArg() { EsQueryExec eqe = (EsQueryExec) p; assertEquals(1, eqe.output().size()); assertEquals("LAST(date)", eqe.output().get(0).qualifiedName()); - assertTrue(eqe.output().get(0).dataType() == DataType.DATETIME); + assertEquals(DataType.DATETIME, eqe.output().get(0).dataType()); assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"date\",\"format\":\"epoch_millis\"}]," + @@ -667,7 +656,7 @@ public void testTopHitsAggregationWithTwoArgs() { EsQueryExec eqe = (EsQueryExec) p; assertEquals(1, eqe.output().size()); assertEquals("FIRST(keyword, int)", eqe.output().get(0).qualifiedName()); - assertTrue(eqe.output().get(0).dataType() == DataType.KEYWORD); + assertEquals(DataType.KEYWORD, eqe.output().get(0).dataType()); assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," + @@ -681,7 +670,7 @@ public void testTopHitsAggregationWithTwoArgs() { EsQueryExec eqe = (EsQueryExec) p; assertEquals(1, eqe.output().size()); assertEquals("LAST(date, int)", eqe.output().get(0).qualifiedName()); - assertTrue(eqe.output().get(0).dataType() == DataType.DATETIME); + assertEquals(DataType.DATETIME, eqe.output().get(0).dataType()); assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"date\",\"format\":\"epoch_millis\"}]," + From 185054c3d176639dd5984911d4f30d4014473342 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Fri, 1 Mar 2019 15:23:15 +0200 Subject: [PATCH 10/39] SQL: ignore UNSUPPORTED fields for JDBC and ODBC modes in 'SYS COLUMNS' (#39518) * SYS COLUMNS will skip UNSUPPORTED field types in ODBC and JDBC, as well. NESTED and OBJECT types were already skipped in ODBC mode, now they are skipped in JDBC mode, as well. (cherry picked from commit 9e0df64b2d36c9069dfa506570468f0522c86417) --- docs/reference/sql/limitations.asciidoc | 4 +- .../plan/logical/command/sys/SysColumns.java | 16 +- .../xpack/sql/type/DataType.java | 2 +- .../logical/command/sys/SysColumnsTests.java | 251 +++++++++++++++++- .../mapping-multi-field-variation.json | 3 +- 5 files changed, 250 insertions(+), 26 deletions(-) diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index 71cff86f1e712..7224ca8830af0 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -7,8 +7,8 @@ === Nested fields in `SYS COLUMNS` and `DESCRIBE TABLE` {es} has a special type of relationship fields called `nested` fields. In {es-sql} they can be used by referencing their inner -sub-fields. Even though `SYS COLUMNS` and `DESCRIBE TABLE` will still display them as having the type `NESTED`, they cannot -be used in a query. One can only reference its sub-fields in the form: +sub-fields. Even though `SYS COLUMNS` in non-driver mode (in the CLI and in REST calls) and `DESCRIBE TABLE` will still display +them as having the type `NESTED`, they cannot be used in a query. One can only reference its sub-fields in the form: [source, sql] -------------------------------------------------- diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index ffe0140c9dcaa..e5c80197296c2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -96,8 +96,8 @@ private List output(boolean odbcCompatible) { @Override public void execute(SqlSession session, ActionListener listener) { - boolean isOdbcClient = session.configuration().mode() == Mode.ODBC; - List output = output(isOdbcClient); + Mode mode = session.configuration().mode(); + List output = output(mode == Mode.ODBC); String cluster = session.indexResolver().clusterName(); // bail-out early if the catalog is present but differs @@ -114,7 +114,7 @@ public void execute(SqlSession session, ActionListener listener) { session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { List> rows = new ArrayList<>(); for (EsIndex esIndex : esIndices) { - fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, isOdbcClient); + fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, mode); } listener.onResponse(Rows.of(output, rows)); @@ -122,8 +122,9 @@ public void execute(SqlSession session, ActionListener listener) { } static void fillInRows(String clusterName, String indexName, Map mapping, String prefix, List> rows, - Pattern columnMatcher, boolean isOdbcClient) { + Pattern columnMatcher, Mode mode) { int pos = 0; + boolean isOdbcClient = mode == Mode.ODBC; for (Map.Entry entry : mapping.entrySet()) { pos++; // JDBC is 1-based so we start with 1 here @@ -132,9 +133,8 @@ static void fillInRows(String clusterName, String indexName, Map> rows = new ArrayList<>(); - SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, false); - assertEquals(16, rows.size()); + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + randomValueOtherThanMany(Mode::isDriver, () -> randomFrom(Mode.values()))); + assertEquals(17, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -37,6 +39,12 @@ public void testSysColumns() { assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(3); + assertEquals("keyword", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); row = rows.get(4); assertEquals("date", name(row)); @@ -45,23 +53,84 @@ public void testSysColumns() { assertEquals(24, precision(row)); assertEquals(8, bufferLength(row)); + row = rows.get(5); + assertEquals("unsupported", name(row)); + assertEquals(Types.OTHER, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(0, bufferLength(row)); + + row = rows.get(6); + assertEquals("some", name(row)); + assertEquals(Types.STRUCT, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(-1, bufferLength(row)); + row = rows.get(7); assertEquals("some.dotted", name(row)); assertEquals(Types.STRUCT, sqlType(row)); assertEquals(null, radix(row)); assertEquals(-1, bufferLength(row)); + + row = rows.get(8); + assertEquals("some.dotted.field", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(9); + assertEquals("some.string", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(10); + assertEquals("some.string.normalized", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(11); + assertEquals("some.string.typical", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(12); + assertEquals("some.ambiguous", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(13); + assertEquals("some.ambiguous.one", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(14); + assertEquals("some.ambiguous.two", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); row = rows.get(15); assertEquals("some.ambiguous.normalized", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(16); + assertEquals("foo_type", name(row)); + assertEquals(Types.OTHER, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(0, bufferLength(row)); } public void testSysColumnsInOdbcMode() { List> rows = new ArrayList<>(); - SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, true); - assertEquals(14, rows.size()); + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + Mode.ODBC); + assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -112,18 +181,17 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataTypeSub(row).getClass()); row = rows.get(5); - assertEquals("unsupported", name(row)); - assertEquals((short) Types.OTHER, sqlType(row)); + assertEquals("some.dotted.field", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); - assertEquals(0, precision(row)); - assertEquals(0, bufferLength(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); assertNull(decimalPrecision(row)); assertEquals(Short.class, nullable(row).getClass()); assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); row = rows.get(6); - assertEquals("some.dotted.field", name(row)); + assertEquals("some.string", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); @@ -133,7 +201,7 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataTypeSub(row).getClass()); row = rows.get(7); - assertEquals("some.string", name(row)); + assertEquals("some.string.normalized", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); @@ -143,7 +211,7 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataTypeSub(row).getClass()); row = rows.get(8); - assertEquals("some.string.normalized", name(row)); + assertEquals("some.string.typical", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); @@ -151,9 +219,29 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, nullable(row).getClass()); assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); - + row = rows.get(9); - assertEquals("some.string.typical", name(row)); + assertEquals("some.ambiguous", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(10); + assertEquals("some.ambiguous.one", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(11); + assertEquals("some.ambiguous.two", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); @@ -162,7 +250,7 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); - row = rows.get(13); + row = rows.get(12); assertEquals("some.ambiguous.normalized", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); @@ -172,6 +260,141 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); } + + public void testSysColumnsInJdbcMode() { + List> rows = new ArrayList<>(); + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + Mode.JDBC); + assertEquals(13, rows.size()); + assertEquals(24, rows.get(0).size()); + + List row = rows.get(0); + assertEquals("bool", name(row)); + assertEquals(Types.BOOLEAN, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(1, bufferLength(row)); + + row = rows.get(1); + assertEquals("int", name(row)); + assertEquals(Types.INTEGER, sqlType(row)); + assertEquals(Integer.class, radix(row).getClass()); + assertEquals(4, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(2); + assertEquals("text", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(3); + assertEquals("keyword", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(4); + assertEquals("date", name(row)); + assertEquals(Types.TIMESTAMP, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(24, precision(row)); + assertEquals(8, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(5); + assertEquals("some.dotted.field", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(6); + assertEquals("some.string", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(7); + assertEquals("some.string.normalized", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(8); + assertEquals("some.string.typical", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(9); + assertEquals("some.ambiguous", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(10); + assertEquals("some.ambiguous.one", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(11); + assertEquals("some.ambiguous.two", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(12); + assertEquals("some.ambiguous.normalized", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + } private static Object name(List list) { return list.get(3); diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json index 13c9f62b2136e..d93633f7aced0 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json @@ -43,6 +43,7 @@ } } } - } + }, + "foo_type" : { "type" : "foo" } } } From 19764418ba10a6f36443b9443ffaeae29d599ce5 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Fri, 1 Mar 2019 15:23:38 +0200 Subject: [PATCH 11/39] Removed custom naming for DISTINCT COUNT (#39537) (cherry picked from commit 9412a2ee01a60dd6449bbced1273ec0b37b65589) --- x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec | 4 ++++ .../xpack/sql/expression/function/aggregate/Count.java | 10 ---------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec index 7406ea488308d..9ed20b1cbc19f 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec @@ -89,6 +89,8 @@ SELECT (emp_no % 3) + 1 AS e, (languages % 3) + 1 AS l FROM test_emp GROUP BY e, // COUNT aggCountImplicit +SELECT COUNT(*) FROM test_emp; +aggCountImplicitAlias SELECT COUNT(*) AS count FROM test_emp; aggCountImplicitWithCast SELECT CAST(COUNT(*) AS INT) c FROM "test_emp"; @@ -109,6 +111,8 @@ SELECT gender g, CAST(COUNT(*) AS INT) c FROM "test_emp" WHERE emp_no < 10020 GR aggCountWithAlias SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g ORDER BY gender; countDistinct +SELECT COUNT(DISTINCT "hire_date") FROM test_emp; +countDistinctAlias SELECT COUNT(DISTINCT hire_date) AS count FROM test_emp; countDistinctAndCountSimpleWithAlias SELECT COUNT(*) cnt, COUNT(DISTINCT first_name) as names, gender FROM test_emp GROUP BY gender ORDER BY gender; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java index 95a1b50cc1139..236cf105a4c80 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java @@ -63,16 +63,6 @@ public String functionId() { return functionId; } - @Override - public String name() { - if (distinct()) { - StringBuilder sb = new StringBuilder(super.name()); - sb.insert(sb.indexOf("(") + 1, "DISTINCT "); - return sb.toString(); - } - return super.name(); - } - @Override public AggregateFunctionAttribute toAttribute() { // COUNT(*) gets its value from the parent aggregation on which _count is called From f14405a41ca68463f063b3a0f397073d53f84a69 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 1 Mar 2019 17:33:44 +0100 Subject: [PATCH 12/39] Make `_doc` work as an alias of the actual type of an index. (#39505) This is similar to the work that has been done on 7.x to make typeless API calls work on indices that have types, except that this commit doesn't introduce typeless calls, eg. the REST API spec or REST handlers haven't been updated. It only affects the get, index, update, delete and bulk APIs. Other APIs that require types such as explain or termvectors are left unchanged. This is necesarry to allow for rolling upgrades from 6.7 to 7.x while internal indices might remain queried during upgrade by nodes that are on either version. Closes #39469 --- .../test/bulk/70_mix_typeless_typeful.yml | 37 ++++++ .../test/delete/70_mix_typeless_typeful.yml | 44 ++++++++ .../test/get/100_mix_typeless_typeful.yml | 48 ++++++++ .../test/index/70_mix_typeless_typeful.yml | 105 ++++++++++++++++++ .../{70_bwc_date.yml => 80_bwc_date.yml} | 0 .../20_mix_typeless_typeful.yml | 15 ++- .../test/update/90_mix_typeless_typeful.yml | 88 +++++++++++++++ .../action/bulk/TransportBulkAction.java | 3 +- .../action/bulk/TransportShardBulkAction.java | 2 +- .../cluster/metadata/IndexMetaData.java | 23 ++++ .../cluster/metadata/MetaData.java | 2 +- .../metadata/MetaDataMappingService.java | 24 +--- .../index/get/ShardGetService.java | 2 +- .../index/mapper/DocumentMapperParser.java | 2 +- .../index/mapper/MapperService.java | 26 +++++ .../elasticsearch/index/shard/IndexShard.java | 24 +++- .../cluster/metadata/IndexMetaDataTests.java | 43 +++++++ .../index/mapper/MapperServiceTests.java | 56 ++++++++++ .../index/shard/IndexShardIT.java | 43 +++++++ .../index/shard/IndexShardTests.java | 64 ++++++++++- .../index/translog/TestTranslog.java | 14 +++ .../elasticsearch/indexing/IndexActionIT.java | 37 ++++++ 22 files changed, 669 insertions(+), 33 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml rename rest-api-spec/src/main/resources/rest-api-spec/test/index/{70_bwc_date.yml => 80_bwc_date.yml} (100%) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml new file mode 100644 index 0000000000000..4739d0240f596 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml @@ -0,0 +1,37 @@ +--- +"bulk without types on an index that has types": + + - skip: + version: " - 6.6.99" + reason: Typeless APIs were introduced in 6.7.0 + + - do: + indices.create: # not using include_type_name: false on purpose + include_type_name: true + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + - do: + bulk: + refresh: true + body: + - index: + _index: index + _type: _doc + _id: 0 + - foo: bar + - index: + _index: index + _type: _doc + _id: 1 + - foo: bar + + - do: + count: + index: index + + - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml new file mode 100644 index 0000000000000..c33da6a889b8d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml @@ -0,0 +1,44 @@ +--- +"DELETE with typeless API on an index that has types": + + - skip: + version: " - 6.6.99" + reason: Typeless APIs were introduced in 6.7.0 + + - do: + indices.create: # not using include_type_name: false on purpose + include_type_name: true + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + catch: bad_request + delete: + index: index + type: some_random_type + id: 1 + + - match: { error.root_cause.0.reason: "/Rejecting.mapping.update.to.\\[index\\].as.the.final.mapping.would.have.more.than.1.type.*/" } + + - do: + delete: + index: index + type: _doc + id: 1 + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _id: "1"} + - match: { _version: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml new file mode 100644 index 0000000000000..07b7d324f6eb2 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml @@ -0,0 +1,48 @@ +--- +"GET with typeless API on an index that has types": + + - skip: + version: " - 6.6.99" + reason: Typeless APIs were introduced in 6.7.0 + + - do: + indices.create: # not using include_type_name: false on purpose + include_type_name: true + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + catch: missing + get: + index: index + type: some_random_type + id: 1 + + - match: { _index: "index" } + - match: { _type: "some_random_type" } + - match: { _id: "1"} + - match: { found: false} + + - do: + get: + index: index + type: _doc + id: 1 + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _id: "1"} + - match: { _version: 1} + - match: { _source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml new file mode 100644 index 0000000000000..359658fac2847 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml @@ -0,0 +1,105 @@ +--- +"Index with typeless API on an index that has types": + + - skip: + version: " - 6.6.99" + reason: Typeless APIs were introduced in 6.7.0 + + - do: + indices.create: # not using include_type_name: false on purpose + include_type_name: true + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: _doc + id: 1 + body: { foo: bar } + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _id: "1"} + - match: { _version: 1} + + - do: + get: # not using typeless API on purpose + index: index + type: not_doc + id: 1 + + - match: { _index: "index" } + - match: { _type: "not_doc" } # the important bit to check + - match: { _id: "1"} + - match: { _version: 1} + - match: { _source: { foo: bar }} + + + - do: + index: + index: index + type: _doc + body: { foo: bar } + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _version: 1} + - set: { _id: id } + + - do: + get: # using typeful API on purpose + index: index + type: not_doc + id: '$id' + + - match: { _index: "index" } + - match: { _type: "not_doc" } # the important bit to check + - match: { _id: $id} + - match: { _version: 1} + - match: { _source: { foo: bar }} + +--- +"Index call that introduces new field mappings": + + - skip: + version: " - 6.6.99" + reason: Typeless APIs were introduced in 6.7.0 + + - do: + indices.create: # not using include_type_name: false on purpose + include_type_name: true + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + - do: + index: + index: index + type: _doc + id: 2 + body: { new_field: value } + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _id: "2" } + - match: { _version: 1 } + + - do: + get: # using typeful API on purpose + index: index + type: not_doc + id: 2 + + - match: { _index: "index" } + - match: { _type: "not_doc" } + - match: { _id: "2" } + - match: { _version: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_bwc_date.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_bwc_date.yml similarity index 100% rename from rest-api-spec/src/main/resources/rest-api-spec/test/index/70_bwc_date.yml rename to rest-api-spec/src/main/resources/rest-api-spec/test/index/80_bwc_date.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml index e443ed1393594..a8522285a091b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml @@ -121,9 +121,22 @@ type: keyword - do: - catch: /the final mapping would have more than 1 type/ index: index: test-1 type: _doc body: { bar: 42 } +# This cluster health call guarantees that changes are visible to the get-mappings API + - do: + cluster.health: + wait_for_events: normal + + - do: + indices.get_mapping: + include_type_name: true + index: test-1 + + - is_true: test-1.mappings.my_type # the template is honored + - is_false: test-1.mappings._doc + - is_true: test-1.mappings.my_type.properties.foo + - is_true: test-1.mappings.my_type.properties.bar diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml new file mode 100644 index 0000000000000..cce88aa087c3b --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml @@ -0,0 +1,88 @@ +--- +"Update with typeless API on an index that has types": + + - skip: + version: " - 6.6.99" + reason: Typeless APIs were introduced in 6.7.0 + + - do: + indices.create: # not using include_type_name: false on purpose + include_type_name: true + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + update: + index: index + type: _doc + id: 1 + body: + doc: + foo: baz + + - do: + get: + index: index + type: not_doc + id: 1 + + - match: { _source.foo: baz } + +--- +"Update call that introduces new field mappings": + + - skip: + version: " - 6.7.99" + reason: Typeless APIs were introduced in 6.7.0 + + - do: + indices.create: # not using include_type_name: false on purpose + include_type_name: true + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + update: + index: index + type: _doc + id: 1 + body: + doc: + foo: baz + new_field: value + - do: + get: # using typeful API on purpose + index: index + type: not_doc + id: 1 + + - match: { _index: "index" } + - match: { _type: "not_doc" } + - match: { _id: "1" } + - match: { _version: 2} + - match: { _source.foo: baz } + - match: { _source.new_field: value } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 54ed633612701..3bfbc18dca76b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -355,7 +355,8 @@ protected void doRun() throws Exception { case INDEX: IndexRequest indexRequest = (IndexRequest) docWriteRequest; final IndexMetaData indexMetaData = metaData.index(concreteIndex); - MappingMetaData mappingMd = indexMetaData.mappingOrDefault(indexRequest.type()); + MappingMetaData mappingMd = indexMetaData.mappingOrDefault( + indexMetaData.resolveDocumentType(indexRequest.type())); Version indexCreated = indexMetaData.getCreationVersion(); indexRequest.resolveRouting(metaData); indexRequest.process(indexCreated, mappingMd, concreteIndex.getName()); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index ebda9bb11edfa..09fdcd652dedf 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -189,7 +189,7 @@ static void executeBulkItemRequest(BulkPrimaryExecutionContext context, UpdateHe case UPDATED: IndexRequest indexRequest = updateResult.action(); IndexMetaData metaData = context.getPrimary().indexSettings().getIndexMetaData(); - MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); + MappingMetaData mappingMd = metaData.mappingOrDefault(metaData.resolveDocumentType(indexRequest.type())); indexRequest.process(metaData.getCreationVersion(), mappingMd, updateRequest.concreteIndex()); context.setRequestToExecute(indexRequest); break; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 524a8c3197748..341de0fd0d2c2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -498,6 +498,29 @@ public MappingMetaData mappingOrDefault(String mappingType) { return mappings.get(MapperService.DEFAULT_MAPPING); } + /** + * Resolves a type from a mapping-related request into the type that should be used when + * merging and updating mappings. + * + * If the special `_doc` type is provided, then we replace it with the actual type that is + * being used in the mappings. This allows typeless APIs such as 'index' or 'put mappings' + * to work against indices with a custom type name. + */ + public String resolveDocumentType(String type) { + if (MapperService.SINGLE_MAPPING_NAME.equals(type) && + mappings.containsKey(type) == false && + getCreationVersion().onOrAfter(Version.V_6_0_0)) { + // If the type is _doc and we have a 6.x index, then _doc is an alias + // for the actual type of the index (if any) + for (ObjectCursor cursor : mappings.keys()) { + if (cursor.value.equals(MapperService.DEFAULT_MAPPING) == false) { + return cursor.value; + } + } + } + return type; + } + ImmutableOpenMap getCustomData() { return this.customData; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 94200222ebd4b..d13abb136ed95 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -707,7 +707,7 @@ public static boolean isExplicitAllType(String[] types) { public boolean routingRequired(String concreteIndex, String type) { IndexMetaData indexMetaData = indices.get(concreteIndex); if (indexMetaData != null) { - MappingMetaData mappingMetaData = indexMetaData.getMappings().get(type); + MappingMetaData mappingMetaData = indexMetaData.getMappings().get(indexMetaData.resolveDocumentType(type)); if (mappingMetaData != null) { return mappingMetaData.routing().required(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index b9e8a40e32c1d..6d635d0fd5798 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateTaskListener; @@ -51,7 +50,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; @@ -242,21 +240,6 @@ class PutMappingExecutor implements ClusterStateTaskExecutor docMappersIt = mapperService.docMappers(false).iterator(); - if (docMappersIt.hasNext()) { - mapper = docMappersIt.next(); - } - if (docMappersIt.hasNext()) { - throw new AssertionError("Index has multiple types: " + mapperService.types()); - } - } - return mapper; - } - private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request, Map indexMapperServices) throws IOException { String mappingType = request.type(); @@ -276,7 +259,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt DocumentMapper newMapper; DocumentMapper existingMapper = mapperService.documentMapper(mappingType); if (existingMapper == null && isMappingSourceTyped(request.type(), mappingUpdateSource) == false) { - existingMapper = getMapperForUpdate(mapperService, mappingType); + existingMapper = mapperService.documentMapper(mapperService.resolveDocumentType(mappingType)); } String typeForUpdate = existingMapper == null ? mappingType : existingMapper.type(); @@ -310,7 +293,8 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt } if (mappingType == null) { mappingType = newMapper.type(); - } else if (mappingType.equals(newMapper.type()) == false) { + } else if (mappingType.equals(newMapper.type()) == false + && mapperService.resolveDocumentType(mappingType).equals(newMapper.type()) == false) { throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); } } @@ -337,7 +321,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt CompressedXContent existingSource = null; DocumentMapper existingMapper = mapperService.documentMapper(mappingType); if (existingMapper == null && isMappingSourceTyped(request.type(), mappingUpdateSource) == false) { - existingMapper = getMapperForUpdate(mapperService, mappingType); + existingMapper = mapperService.documentMapper(mapperService.resolveDocumentType(mappingType)); } if (existingMapper != null) { typeForUpdate = existingMapper.type(); diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index ad3786541139f..7e507a2b2ab54 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -216,7 +216,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] } } - DocumentMapper docMapper = mapperService.documentMapper(type); + DocumentMapper docMapper = mapperService.documentMapper(mapperService.resolveDocumentType(type)); if (docMapper.parentFieldMapper().active()) { String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.reader, docIdAndVersion.docId); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index e63d5a279f3cd..e388dd7ebcd00 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -197,7 +197,7 @@ private Tuple> extractMapping(String type, Map> mapping; - if (type == null || type.equals(rootName)) { + if (type == null || type.equals(rootName) || mapperService.resolveDocumentType(type).equals(rootName)) { mapping = new Tuple<>(rootName, (Map) root.get(rootName)); } else { mapping = new Tuple<>(type, root); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 0695bb341b710..6138287da084e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -709,6 +709,29 @@ public DocumentMapper documentMapper(String type) { return mappers.get(type); } + /** + * Resolves a type from a mapping-related request into the type that should be used when + * merging and updating mappings. + * + * If the special `_doc` type is provided, then we replace it with the actual type that is + * being used in the mappings. This allows typeless APIs such as 'index' or 'put mappings' + * to work against indices with a custom type name. + */ + public String resolveDocumentType(String type) { + if (MapperService.SINGLE_MAPPING_NAME.equals(type) && + mappers.containsKey(type) == false && + indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0)) { + // If the type is _doc and we have a 6.x index, then _doc is an alias + // for the actual type of the index (if any) + for (String t : mappers.keySet()) { + if (t.equals(DEFAULT_MAPPING) == false) { + return t; + } + } + } + return type; + } + /** * Returns the document mapper created, including a mapping update if the * type has been dynamically created. @@ -858,9 +881,12 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { /** Return a term that uniquely identifies the document, or {@code null} if the type is not allowed. */ public Term createUidTerm(String type, String id) { + type = resolveDocumentType(type); + if (hasMapping(type) == false) { return null; } + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0_beta1)) { assert indexSettings.isSingleType(); return new Term(IdFieldMapper.NAME, Uid.encodeId(id)); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index ef861e9ac9c66..b6282ce905357 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -760,7 +760,17 @@ private Engine.IndexResult applyIndexOperation(Engine engine, long seqNo, long o ensureWriteAllowed(origin); Engine.Index operation; try { - operation = prepareIndex(docMapper(sourceToParse.type()), indexSettings.getIndexVersionCreated(), sourceToParse, seqNo, + final String resolvedType = mapperService.resolveDocumentType(sourceToParse.type()); + final SourceToParse sourceWithResolvedType; + if (resolvedType.equals(sourceToParse.type())) { + sourceWithResolvedType = sourceToParse; + } else { + sourceWithResolvedType = SourceToParse.source(sourceToParse.index(), resolvedType, sourceToParse.id(), + sourceToParse.source(), sourceToParse.getXContentType()) + .routing(sourceToParse.routing()) + .parent(sourceToParse.parent()); + } + operation = prepareIndex(docMapper(resolvedType), indexSettings.getIndexVersionCreated(), sourceWithResolvedType, seqNo, opPrimaryTerm, version, versionType, origin, autoGeneratedTimeStamp, isRetry, ifSeqNo, ifPrimaryTerm); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); @@ -869,6 +879,7 @@ private Engine.DeleteResult applyDeleteOperation(Engine engine, long seqNo, long assert opPrimaryTerm <= getOperationPrimaryTerm() : "op term [ " + opPrimaryTerm + " ] > shard term [" + getOperationPrimaryTerm() + "]"; assert versionType.validateVersionForWrites(version); + ensureWriteAllowed(origin); if (indexSettings().isSingleType()) { // When there is a single type, the unique identifier is only composed of the _id, @@ -893,11 +904,12 @@ private Engine.DeleteResult applyDeleteOperation(Engine engine, long seqNo, long return delete(engine, delete); } - private static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, + private Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, VersionType versionType, Engine.Operation.Origin origin, long ifSeqNo, long ifPrimaryTerm) { long startTime = System.nanoTime(); - return new Engine.Delete(type, id, uid, seqNo, primaryTerm, version, versionType, origin, startTime, ifSeqNo, ifPrimaryTerm); + return new Engine.Delete(mapperService.resolveDocumentType(type), id, uid, seqNo, primaryTerm, version, versionType, origin, + startTime, ifSeqNo, ifPrimaryTerm); } private Term extractUidForDelete(String type, String id) { @@ -935,6 +947,10 @@ private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws I public Engine.GetResult get(Engine.Get get) { readAllowed(); + String resolvedType = mapperService.resolveDocumentType(get.type()); + if (mapperService.hasMapping(resolvedType) == false) { + return Engine.GetResult.NOT_EXISTS; + } return getEngine().get(get, this::acquireSearcher); } @@ -2492,7 +2508,7 @@ private static void persistMetadata( } private DocumentMapperForType docMapper(String type) { - return mapperService.documentMapperWithAutoCreate(type); + return mapperService.documentMapperWithAutoCreate(mapperService.resolveDocumentType(type)); } private EngineConfig newEngineConfig() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index 1fdea596afbf9..400cb4895db03 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; @@ -287,4 +288,46 @@ public void testNumberOfRoutingShards() { () -> IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(notAFactorySettings)); assertEquals("the number of source shards [2] must be a factor of [3]", iae.getMessage()); } + + public void testResolveDocumentType() throws IOException { + Settings settings = Settings.builder() + .put("index.number_of_shards", 5) + .put("index.number_of_replicas", 1) + .put("index.version.created", Version.CURRENT).build(); + + IndexMetaData emptyMetaData = IndexMetaData.builder("index") + .settings(settings) + .build(); + assertEquals("_doc", emptyMetaData.resolveDocumentType("_doc")); + assertEquals("my_type", emptyMetaData.resolveDocumentType("my_type")); + assertEquals("_default_", emptyMetaData.resolveDocumentType("_default_")); + + IndexMetaData singleMetaData = IndexMetaData.builder("index") + .settings(settings) + .putMapping("my_type", "{}") + .build(); + assertEquals("my_type", singleMetaData.resolveDocumentType("_doc")); + assertEquals("my_type", singleMetaData.resolveDocumentType("my_type")); + assertEquals("other_type", singleMetaData.resolveDocumentType("other_type")); + assertEquals("_default_", singleMetaData.resolveDocumentType("_default_")); + + IndexMetaData onlyDefaultMetaData = IndexMetaData.builder("index") + .settings(settings) + .putMapping("_default_", "{}") + .build(); + assertEquals("_doc", onlyDefaultMetaData.resolveDocumentType("_doc")); + assertEquals("my_type", onlyDefaultMetaData.resolveDocumentType("my_type")); + assertEquals("other_type", onlyDefaultMetaData.resolveDocumentType("other_type")); + assertEquals("_default_", onlyDefaultMetaData.resolveDocumentType("_default_")); + + IndexMetaData defaultAndTypeMetaData = IndexMetaData.builder("index") + .settings(settings) + .putMapping("_default_", "{}") + .putMapping("my_type", "{}") + .build(); + assertEquals("my_type", defaultAndTypeMetaData.resolveDocumentType("_doc")); + assertEquals("my_type", defaultAndTypeMetaData.resolveDocumentType("my_type")); + assertEquals("other_type", defaultAndTypeMetaData.resolveDocumentType("other_type")); + assertEquals("_default_", defaultAndTypeMetaData.resolveDocumentType("_default_")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index d43752417bf88..33a7e444a3820 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -372,4 +372,60 @@ public void testDefaultMappingIsDeprecated() throws IOException { "cannot have more than one type"); } + public void testResolveDocumentType() throws IOException { + MapperService mapperService = createIndex("test").mapperService(); + assertEquals("_doc", mapperService.resolveDocumentType("_doc")); + assertEquals("my_type", mapperService.resolveDocumentType("my_type")); + assertEquals("_default_", mapperService.resolveDocumentType("_default_")); + + String mapping = Strings.toString( + XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject()); + mapperService.merge("type1", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + + assertEquals("type1", mapperService.resolveDocumentType("_doc")); + assertEquals("type1", mapperService.resolveDocumentType("type1")); + assertEquals("my_type", mapperService.resolveDocumentType("my_type")); + + MapperService mapperService2 = createIndex("test2").mapperService(); + mapperService2.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + assertWarnings("[_default_] mapping is deprecated since it is not useful anymore now that indexes " + + "cannot have more than one type"); + + assertEquals("_doc", mapperService2.resolveDocumentType("_doc")); + assertEquals("my_type", mapperService2.resolveDocumentType("my_type")); + + mapperService2.merge("type1", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + assertEquals("type1", mapperService2.resolveDocumentType("_doc")); + assertEquals("type1", mapperService2.resolveDocumentType("type1")); + assertEquals("my_type", mapperService2.resolveDocumentType("my_type")); + } + + public void testResolveDocumentType5x() throws IOException { + MapperService mapperService = createIndex("test", Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_16).build()).mapperService(); + assertEquals("_doc", mapperService.resolveDocumentType("_doc")); + assertEquals("my_type", mapperService.resolveDocumentType("my_type")); + + String mapping = Strings.toString( + XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject()); + mapperService.merge("type1", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + + assertEquals("_doc", mapperService.resolveDocumentType("_doc")); + assertEquals("type1", mapperService.resolveDocumentType("type1")); + assertEquals("my_type", mapperService.resolveDocumentType("my_type")); + + mapperService.merge("type2", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + assertEquals("_doc", mapperService.resolveDocumentType("_doc")); + assertEquals("type1", mapperService.resolveDocumentType("type1")); + assertEquals("type2", mapperService.resolveDocumentType("type2")); + assertEquals("my_type", mapperService.resolveDocumentType("my_type")); + + MapperService mapperService2 = createIndex("test2").mapperService(); + mapperService2.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + assertWarnings("[_default_] mapping is deprecated since it is not useful anymore now that indexes " + + "cannot have more than one type"); + + assertEquals("_doc", mapperService2.resolveDocumentType("_doc")); + assertEquals("my_type", mapperService2.resolveDocumentType("my_type")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 159cffd52b5d9..45ef3f1f393f9 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -21,6 +21,7 @@ import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; +import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; @@ -65,6 +66,7 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.translog.TestTranslog; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.indices.IndicesService; @@ -796,4 +798,45 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti client().search(countRequest).actionGet().getHits().totalHits, equalTo(numDocs + moreDocs)); } + public void testShardChangesWithDefaultDocType() throws Exception { + Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.translog.flush_threshold_size", "512mb") // do not flush + .put("index.soft_deletes.enabled", true).build(); + IndexService indexService = createIndex("index", settings, "user_doc", "title", "type=keyword"); + int numOps = between(1, 10); + for (int i = 0; i < numOps; i++) { + if (randomBoolean()) { + client().prepareIndex("index", randomFrom("_doc", "user_doc"), randomFrom("1", "2")) + .setSource("{}", XContentType.JSON).setVersionType(VersionType.EXTERNAL).setVersion(i).get(); + } else { + client().prepareDelete("index", randomFrom("_doc", "user_doc"), randomFrom("1", "2")) + .setVersionType(VersionType.EXTERNAL).setVersion(i).get(); + } + } + IndexShard shard = indexService.getShard(0); + try (Translog.Snapshot luceneSnapshot = shard.newChangesSnapshot("test", 0, numOps - 1, true); + Translog.Snapshot translogSnapshot = getTranslog(shard).newSnapshot()) { + List opsFromLucene = TestTranslog.drainSnapshot(luceneSnapshot, true); + List opsFromTranslog = TestTranslog.drainSnapshot(translogSnapshot, true); + assertThat(opsFromLucene, equalTo(opsFromTranslog)); + } + } + + public void testRoutingRequiresTypeless() throws IOException { + Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0).build(); + createIndex("index", settings, "some_type", "_routing", "required=true"); + + expectThrows(RoutingMissingException.class, + client().prepareIndex("index", "_doc", "1").setSource()::get); + + expectThrows(RoutingMissingException.class, + client().prepareDelete("index", "_doc", "1")::get); + + expectThrows(RoutingMissingException.class, + client().prepareUpdate("index", "_doc", "1").setDoc()::get); + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 390b15cbb6611..70514e03a487b 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -83,6 +84,7 @@ import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.Engine.DeleteResult; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; @@ -1510,7 +1512,7 @@ public void testRefreshMetric() throws IOException { } long refreshCount = shard.refreshStats().getTotal(); indexDoc(shard, "_doc", "test"); - try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test", + try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "_doc", "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) { assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1)); } @@ -2220,7 +2222,7 @@ public void testSearcherWrapperIsUsed() throws IOException { indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}"); shard.refresh("test"); - Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "test", "1", + Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "_doc", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); @@ -2262,7 +2264,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); assertEquals(search.totalHits, 1); } - getResult = newShard.get(new Engine.Get(false, false, "test", "1", + getResult = newShard.get(new Engine.Get(false, false, "_doc", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader @@ -3664,4 +3666,60 @@ private void randomReplicaOperationPermitAcquisition(final IndexShard indexShard indexShard.acquireAllReplicaOperationsPermits(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, listener, timeout); } } + + public void testTypelessDelete() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("index") + .putMapping("some_type", "{ \"properties\": {}}") + .settings(settings) + .build(); + IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(shard); + Engine.IndexResult indexResult = indexDoc(shard, "some_type", "id", "{}"); + assertTrue(indexResult.isCreated()); + + DeleteResult deleteResult = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, "some_other_type", "id", VersionType.INTERNAL, + SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); + assertFalse(deleteResult.isFound()); + + deleteResult = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, "_doc", "id", VersionType.INTERNAL, + SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); + assertTrue(deleteResult.isFound()); + + closeShards(shard); + } + + public void testTypelessGet() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("index") + .putMapping("some_type", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(shard); + Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}"); + assertTrue(indexResult.isCreated()); + + org.elasticsearch.index.engine.Engine.GetResult getResult = shard.get( + new Engine.Get(true, true, "some_type", "0", new Term("_id", Uid.encodeId("0")))); + assertTrue(getResult.exists()); + getResult.close(); + + getResult = shard.get(new Engine.Get(true, true, "some_other_type", "0", new Term("_id", Uid.encodeId("0")))); + assertFalse(getResult.exists()); + getResult.close(); + + getResult = shard.get(new Engine.Get(true, true, "_doc", "0", new Term("_id", Uid.encodeId("0")))); + assertTrue(getResult.exists()); + getResult.close(); + + closeShards(shard); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java index 0e114233856c0..003054fc71550 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java @@ -34,7 +34,9 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.Collection; +import java.util.Comparator; import java.util.List; import java.util.Random; import java.util.Set; @@ -128,4 +130,16 @@ public static long minTranslogGenUsedInRecovery(Path translogPath) throws IOExce public static long getCurrentTerm(Translog translog) { return translog.getCurrent().getPrimaryTerm(); } + + public static List drainSnapshot(Translog.Snapshot snapshot, boolean sortBySeqNo) throws IOException { + final List ops = new ArrayList<>(snapshot.totalOperations()); + Translog.Operation op; + while ((op = snapshot.next()) != null) { + ops.add(op); + } + if (sortBySeqNo) { + ops.sort(Comparator.comparing(Translog.Operation::seqNo)); + } + return ops; + } } diff --git a/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java index 54cf43deb230b..2b3226e240e8c 100644 --- a/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java @@ -19,6 +19,8 @@ package org.elasticsearch.indexing; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteResponse.Result; +import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; @@ -254,4 +256,39 @@ public void testDocumentWithBlankFieldName() { assertThat(e.getRootCause().getMessage(), containsString("field name cannot be an empty string")); } + + public void test_docAsAliasOfActualTypeName() { + ElasticsearchAssertions.assertAcked( + client().admin().indices().prepareCreate("index").addMapping("some_type", "foo", "type=keyword").get()); + + // Index + client().prepareIndex("index", "_doc", "1").setSource("foo", "bar").get(); + assertTrue(client().prepareGet("index", "some_type", "1").get().isExists()); + + // Get + assertTrue(client().prepareGet("index", "_doc", "1").get().isExists()); + + // Update + assertEquals(Result.UPDATED, client().prepareUpdate("index", "_doc", "1").setDoc("foo", "baz").get().getResult()); + assertEquals(Result.CREATED, client().prepareUpdate("index", "_doc", "2").setDocAsUpsert(true).setDoc("foo", "quux") + .get().getResult()); + assertEquals("baz", client().prepareGet("index", "some_type", "1").get().getSource().get("foo")); + assertEquals("quux", client().prepareGet("index", "some_type", "2").get().getSource().get("foo")); + + // Delete + assertEquals(Result.DELETED, client().prepareDelete("index", "_doc", "1").get().getResult()); + assertFalse(client().prepareGet("index", "some_type", "1").get().isExists()); + + // Bulk + BulkResponse response = client().prepareBulk("index", "_doc") + .add(client().prepareIndex("index", "_doc", "1").setSource("foo", "bar")) + .add(client().prepareDelete("index", "_doc", "2")) + .get(); + assertFalse(response.hasFailures()); + BulkItemResponse[] items = response.getItems(); + assertEquals(Result.CREATED, items[0].getResponse().getResult()); + assertEquals(Result.DELETED, items[1].getResponse().getResult()); + assertTrue(client().prepareGet("index", "some_type", "1").get().isExists()); + assertFalse(client().prepareGet("index", "some_type", "2").get().isExists()); + } } From 3ff0f240d76a5e0860a814820bdba80206b90d5a Mon Sep 17 00:00:00 2001 From: James Baiera Date: Fri, 1 Mar 2019 13:28:29 -0500 Subject: [PATCH 13/39] Remove missing variable from error message (#39321) When test clusters are stood up, one of the steps in the wait task is to wait for ports files to appear. An exception throw was added if this were to time out instead of failing with no information, but the exception text uses a missing variable which further obfuscates the problem. Backports #39321 --- .../org/elasticsearch/gradle/test/ClusterConfiguration.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 0e039a388a08d..b4d7f83fbfca7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -110,7 +110,7 @@ class ClusterConfiguration { } if (ant.properties.containsKey("failed.${seedNode.transportPortsFile.path}".toString())) { throw new GradleException("Failed to locate seed node transport file [${seedNode.transportPortsFile}]: " + - "timed out waiting for it to be created after ${waitSeconds} seconds") + "timed out waiting for it to be created after 40 seconds") } return seedNode.transportUri() } From 1e337c3b2160ea981c61e029a7d96bc7fb0afdd9 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 1 Mar 2019 10:29:59 -0800 Subject: [PATCH 14/39] [DOCS] Adds link to list of built-in users (#39529) --- docs/reference/commands/setup-passwords.asciidoc | 4 ++-- x-pack/docs/en/security/configuring-es.asciidoc | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/reference/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc index 6e6d3dd75ed21..3dcc9001534fa 100644 --- a/docs/reference/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -3,8 +3,8 @@ [[setup-passwords]] == elasticsearch-setup-passwords -The `elasticsearch-setup-passwords` command sets the passwords for the built-in -`elastic`, `kibana`, `logstash_system`, `beats_system`, and `apm_system` users. +The `elasticsearch-setup-passwords` command sets the passwords for the +{stack-ov}/built-in-users.html[built-in users]. [float] === Synopsis diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 89dcdc2ad35ec..6e8217e3ba750 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -55,8 +55,7 @@ help you get up and running. The +elasticsearch-setup-passwords+ command is the simplest method to set the built-in users' passwords for the first time. For example, you can run the command in an "interactive" mode, which prompts you -to enter new passwords for the `elastic`, `kibana`, `beats_system`, -`logstash_system`, and `apm_system` users: +to enter new passwords for the built-in users: [source,shell] -------------------------------------------------- From 79ddc0c965191e443fa87a2196364607e3a169f4 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Sat, 2 Mar 2019 10:29:48 +0200 Subject: [PATCH 15/39] SQL: Fix merging of incompatible multi-fields (#39560) Fix bug in IndexResolver that caused conflicts in multi-field types to be ignored up (causing the query to fail later on due to mapping conflicts). The issue was caused by the multi-field which forced the parent creation before checking its validity across mappings Fix #39547 (cherry picked from commit 4e4fe289f90b9b5eae09072d54903701a3128696) --- .../sql/analysis/index/IndexResolver.java | 4 +- .../analysis/index/IndexResolverTests.java | 42 +++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 43d356720f8ed..ec2dfa46f47f2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -324,7 +324,9 @@ static IndexResolution mergedMapping(String indexPattern, Map { return invalidF != null ? invalidF : createField(s, fieldCapab.getType(), emptyMap(), fieldCapab.isAggregatable()); }); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java index 6123bdf5d8fbb..0f4f8f030506c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java @@ -133,6 +133,48 @@ public void testMetaFieldsAreIgnored() throws Exception { assertEquals(DataType.KEYWORD, esIndex.mapping().get("text").getDataType()); } + public void testMergeIncompatibleCapabilitiesOfObjectFields() throws Exception { + Map> fieldCaps = new HashMap<>(); + + int depth = randomInt(5); + + List level = new ArrayList<>(); + String fieldName = randomAlphaOfLength(3); + level.add(fieldName); + for (int i = 0; i <= depth; i++) { + String l = randomAlphaOfLength(3); + level.add(l); + fieldName += "." + l; + } + + // define a sub-field + addFieldCaps(fieldCaps, fieldName + ".keyword", "keyword", true, true); + + Map multi = new HashMap<>(); + multi.put("long", new FieldCapabilities(fieldName, "long", true, true, new String[] { "one-index" }, null, null)); + multi.put("text", new FieldCapabilities(fieldName, "text", true, false, new String[] { "another-index" }, null, null)); + fieldCaps.put(fieldName, multi); + + + String wildcard = "*"; + IndexResolution resolution = IndexResolver.mergedMapping(wildcard, fieldCaps); + + assertTrue(resolution.isValid()); + + EsIndex esIndex = resolution.get(); + assertEquals(wildcard, esIndex.name()); + EsField esField = null; + Map props = esIndex.mapping(); + for (String lvl : level) { + esField = props.get(lvl); + props = esField.getProperties(); + } + assertEquals(InvalidMappedField.class, esField.getClass()); + assertEquals("mapped as [2] incompatible types: [text] in [another-index], [long] in [one-index]", + ((InvalidMappedField) esField).errorMessage()); + } + + public static IndexResolution merge(EsIndex... indices) { return IndexResolver.mergedMapping("*", fromMappings(indices)); } From 0cab261832d0bc9b27ccc57583629f5347ea94c5 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 1 Mar 2019 17:17:26 +0200 Subject: [PATCH 16/39] [ML] Shave off DeleteExpiredDataIT runtime (#39557) This commit parallelizes some parts of the test and its remove an unnecessary refresh call. On my local machine it shaves off about 15 seconds for a test execution time of ~64s (down from ~80s). This test is still slow but progress over perfection. Relates #37339 --- .../ml/integration/DeleteExpiredDataIT.java | 37 ++++++++++++------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index f51f694edd483..4febf4324cd42 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; @@ -77,9 +78,6 @@ public void setUpData() throws IOException { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertThat(bulkResponse.hasFailures(), is(false)); - - // Ensure all data is searchable - client().admin().indices().prepareRefresh(DATA_INDEX).get(); } @After @@ -94,6 +92,17 @@ public void testDeleteExpiredDataGivenNothingToDelete() throws Exception { } public void testDeleteExpiredData() throws Exception { + // Index some unused state documents (more than 10K to test scrolling works) + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 10010; i++) { + String docId = "non_existing_job_" + randomFrom("model_state_1234567#" + i, "quantiles", "categorizer_state#" + i); + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), "doc", docId); + indexRequest.source(Collections.emptyMap()); + bulkRequestBuilder.add(indexRequest); + } + ActionFuture indexUnusedStateDocsResponse = bulkRequestBuilder.execute(); + registerJob(newJobBuilder("no-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(1000L)); registerJob(newJobBuilder("results-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(1000L)); registerJob(newJobBuilder("snapshots-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(2L)); @@ -104,6 +113,8 @@ public void testDeleteExpiredData() throws Exception { long now = System.currentTimeMillis(); long oneDayAgo = now - TimeValue.timeValueHours(48).getMillis() - 1; + + // Start all jobs for (Job.Builder job : getJobs()) { putJob(job); @@ -118,7 +129,14 @@ public void testDeleteExpiredData() throws Exception { // Run up to a day ago openJob(job.getId()); startDatafeed(datafeedId, 0, now - TimeValue.timeValueHours(24).getMillis()); + } + + // Now let's wait for all jobs to be closed + for (Job.Builder job : getJobs()) { waitUntilJobIsClosed(job.getId()); + } + + for (Job.Builder job : getJobs()) { assertThat(getBuckets(job.getId()).size(), is(greaterThanOrEqualTo(47))); assertThat(getRecords(job.getId()).size(), equalTo(1)); List modelSnapshots = getModelSnapshots(job.getId()); @@ -144,6 +162,7 @@ public void testDeleteExpiredData() throws Exception { waitForecastToFinish(job.getId(), forecastDefaultExpiryId); waitForecastToFinish(job.getId(), forecastNoExpiryId); } + // Refresh to ensure the snapshot timestamp updates are visible client().admin().indices().prepareRefresh("*").get(); @@ -176,16 +195,8 @@ public void testDeleteExpiredData() throws Exception { assertThat(countForecastDocs(forecastStat.getJobId(), forecastStat.getForecastId()), equalTo(forecastStat.getRecordCount())); } - // Index some unused state documents (more than 10K to test scrolling works) - BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - for (int i = 0; i < 10010; i++) { - String docId = "non_existing_job_" + randomFrom("model_state_1234567#" + i, "quantiles", "categorizer_state#" + i); - IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), "doc", docId); - indexRequest.source(Collections.emptyMap()); - bulkRequestBuilder.add(indexRequest); - } - assertThat(bulkRequestBuilder.get().status(), equalTo(RestStatus.OK)); + // Before we call the delete-expired-data action we need to make sure the unused state docs were indexed + assertThat(indexUnusedStateDocsResponse.get().status(), equalTo(RestStatus.OK)); // Now call the action under test client().execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request()).get(); From f5508f87c9d1488ee441e19d1e2e15714fd454de Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Mon, 4 Mar 2019 13:01:41 +1100 Subject: [PATCH 17/39] Mute failing test on FIPS JVM Relates: #39580 Backport of: #39616 --- .../xpack/core/ssl/SSLConfigurationReloaderTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 3980fb22c3f07..86029cdc75dd9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -448,6 +448,7 @@ void reloadSSLContext(SSLConfiguration configuration) { * truncating the certificate file that is being monitored */ public void testPEMTrustReloadException() throws Exception { + assumeFalse("Broken on BC-FIPS -- https://github.com/elastic/elasticsearch/issues/39580", inFipsJvm()); Path tempDir = createTempDir(); Path clientCertPath = tempDir.resolve("testclient.crt"); Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"), clientCertPath); From 6d2c886277e33c481e89cabfccb3f172f46a77af Mon Sep 17 00:00:00 2001 From: David Turner Date: Sat, 2 Mar 2019 13:56:33 +0000 Subject: [PATCH 18/39] Mute testDoNotWaitForPendingSeqNo Relates #39510, #39595. --- .../index/replication/RecoveryDuringReplicationTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 474341429a84f..2af4249c907a0 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -439,6 +439,7 @@ public void testResyncAfterPrimaryPromotion() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39510") public void testDoNotWaitForPendingSeqNo() throws Exception { IndexMetaData metaData = buildIndexMetaData(1); From d4b4225f63c8dc08ae2364e6841a8226c1e03bc2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 4 Mar 2019 10:45:45 +0100 Subject: [PATCH 19/39] Snapshot Stability Fixes (#39550) * Backport of various snapshot stability fixes from `master` to `6.7` making the snapshot logic in `6.7` equivalent to that in `master` functionally * Includes #38368, #38025 and #37612 --- .../cluster/SnapshotsInProgress.java | 36 +- .../snapshots/SnapshotException.java | 4 - .../snapshots/SnapshotShardsService.java | 378 +++++++------- .../snapshots/SnapshotsService.java | 467 +++++++++--------- 4 files changed, 444 insertions(+), 441 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 7308d471afb9d..73be2ea006656 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState.Custom; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -93,9 +94,11 @@ public static class Entry { private final ImmutableOpenMap> waitingIndices; private final long startTime; private final long repositoryStateId; + @Nullable private final String failure; public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, - long startTime, long repositoryStateId, ImmutableOpenMap shards) { + long startTime, long repositoryStateId, ImmutableOpenMap shards, + String failure) { this.state = state; this.snapshot = snapshot; this.includeGlobalState = includeGlobalState; @@ -110,15 +113,26 @@ public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, Sta this.waitingIndices = findWaitingIndices(shards); } this.repositoryStateId = repositoryStateId; + this.failure = failure; + } + + public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, + long startTime, long repositoryStateId, ImmutableOpenMap shards) { + this(snapshot, includeGlobalState, partial, state, indices, startTime, repositoryStateId, shards, null); } public Entry(Entry entry, State state, ImmutableOpenMap shards) { this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, - entry.repositoryStateId, shards); + entry.repositoryStateId, shards, entry.failure); + } + + public Entry(Entry entry, State state, ImmutableOpenMap shards, String failure) { + this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, + entry.repositoryStateId, shards, failure); } public Entry(Entry entry, ImmutableOpenMap shards) { - this(entry, entry.state, shards); + this(entry, entry.state, shards, entry.failure); } public Snapshot snapshot() { @@ -157,6 +171,10 @@ public long getRepositoryStateId() { return repositoryStateId; } + public String failure() { + return failure; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -437,6 +455,12 @@ public SnapshotsInProgress(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { repositoryStateId = in.readLong(); } + final String failure; + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { + failure = in.readOptionalString(); + } else { + failure = null; + } entries[i] = new Entry(snapshot, includeGlobalState, partial, @@ -444,7 +468,8 @@ public SnapshotsInProgress(StreamInput in) throws IOException { Collections.unmodifiableList(indexBuilder), startTime, repositoryStateId, - builder.build()); + builder.build(), + failure); } this.entries = Arrays.asList(entries); } @@ -476,6 +501,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { out.writeLong(entry.repositoryStateId); } + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { + out.writeOptionalString(entry.failure); + } } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java index d389ed634f3af..05db85d6f7211 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotException.java @@ -51,10 +51,6 @@ public SnapshotException(final Snapshot snapshot, final String msg, final Throwa } } - public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg) { - this(repositoryName, snapshotId, msg, null); - } - public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg, final Throwable cause) { super("[" + repositoryName + ":" + snapshotId + "] " + msg, cause); this.repositoryName = repositoryName; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 3f1cf1db32807..116a3f45b0087 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -69,26 +69,26 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestDeduplicator; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; -import static java.util.Collections.unmodifiableMap; +import static java.util.Collections.unmodifiableList; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; import static org.elasticsearch.transport.EmptyTransportResponseHandler.INSTANCE_SAME; @@ -114,11 +114,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final ThreadPool threadPool; - private final Lock shutdownLock = new ReentrantLock(); + private final Map> shardSnapshots = new HashMap<>(); - private final Condition shutdownCondition = shutdownLock.newCondition(); - - private volatile Map> shardSnapshots = emptyMap(); + // A map of snapshots to the shardIds that we already reported to the master as failed + private final TransportRequestDeduplicator remoteFailedRequestDeduplicator = + new TransportRequestDeduplicator<>(); private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; @@ -139,7 +139,7 @@ public SnapshotShardsService(Settings settings, ClusterService clusterService, S } // The constructor of UpdateSnapshotStatusAction will register itself to the TransportService. - this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction(settings, UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction( transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver); if (DiscoveryNode.isMasterNode(settings)) { @@ -147,7 +147,6 @@ public SnapshotShardsService(Settings settings, ClusterService clusterService, S transportService.registerRequestHandler(UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6, UpdateSnapshotStatusRequestV6::new, ThreadPool.Names.SAME, new UpdateSnapshotStateRequestHandlerV6()); } - } @Override @@ -161,16 +160,6 @@ protected void doStart() { @Override protected void doStop() { - shutdownLock.lock(); - try { - while(!shardSnapshots.isEmpty() && shutdownCondition.await(5, TimeUnit.SECONDS)) { - // Wait for at most 5 second for locally running snapshots to finish - } - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } finally { - shutdownLock.unlock(); - } } @Override @@ -185,7 +174,9 @@ public void clusterChanged(ClusterChangedEvent event) { SnapshotsInProgress currentSnapshots = event.state().custom(SnapshotsInProgress.TYPE); if ((previousSnapshots == null && currentSnapshots != null) || (previousSnapshots != null && previousSnapshots.equals(currentSnapshots) == false)) { - processIndexShardSnapshots(event); + synchronized (shardSnapshots) { + processIndexShardSnapshots(currentSnapshots, event.state().nodes().getMasterNode()); + } } String previousMasterNodeId = event.previousState().nodes().getMasterNodeId(); @@ -202,13 +193,14 @@ public void clusterChanged(ClusterChangedEvent event) { @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { // abort any snapshots occurring on the soon-to-be closed shard - Map> snapshotShardsMap = shardSnapshots; - for (Map.Entry> snapshotShards : snapshotShardsMap.entrySet()) { - Map shards = snapshotShards.getValue(); - if (shards.containsKey(shardId)) { - logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", - shardId, snapshotShards.getKey().getSnapshotId()); - shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); + synchronized (shardSnapshots) { + for (Map.Entry> snapshotShards : shardSnapshots.entrySet()) { + Map shards = snapshotShards.getValue(); + if (shards.containsKey(shardId)) { + logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", + shardId, snapshotShards.getKey().getSnapshotId()); + shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); + } } } } @@ -223,163 +215,146 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh * @return map of shard id to snapshot status */ public Map currentSnapshotShards(Snapshot snapshot) { - return shardSnapshots.get(snapshot); + synchronized (shardSnapshots) { + final Map current = shardSnapshots.get(snapshot); + return current == null ? null : new HashMap<>(current); + } } /** * Checks if any new shards should be snapshotted on this node * - * @param event cluster state changed event + * @param snapshotsInProgress Current snapshots in progress in cluster state */ - private void processIndexShardSnapshots(ClusterChangedEvent event) { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - Map> survivors = new HashMap<>(); + private void processIndexShardSnapshots(SnapshotsInProgress snapshotsInProgress, DiscoveryNode masterNode) { + cancelRemoved(snapshotsInProgress); + if (snapshotsInProgress != null) { + startNewSnapshots(snapshotsInProgress, masterNode); + } + } + + private void cancelRemoved(@Nullable SnapshotsInProgress snapshotsInProgress) { // First, remove snapshots that are no longer there - for (Map.Entry> entry : shardSnapshots.entrySet()) { + Iterator>> it = shardSnapshots.entrySet().iterator(); + while (it.hasNext()) { + final Map.Entry> entry = it.next(); final Snapshot snapshot = entry.getKey(); - if (snapshotsInProgress != null && snapshotsInProgress.snapshot(snapshot) != null) { - survivors.put(entry.getKey(), entry.getValue()); - } else { + if (snapshotsInProgress == null || snapshotsInProgress.snapshot(snapshot) == null) { // abort any running snapshots of shards for the removed entry; // this could happen if for some reason the cluster state update for aborting // running shards is missed, then the snapshot is removed is a subsequent cluster // state update, which is being processed here + it.remove(); for (IndexShardSnapshotStatus snapshotStatus : entry.getValue().values()) { snapshotStatus.abortIfNotCompleted("snapshot has been removed in cluster state, aborting"); } } } + } + private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress, DiscoveryNode masterNode) { // For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running // snapshots in the future - Map> newSnapshots = new HashMap<>(); // Now go through all snapshots and update existing or create missing - final String localNodeId = event.state().nodes().getLocalNodeId(); - final DiscoveryNode masterNode = event.state().nodes().getMasterNode(); - final Map> snapshotIndices = new HashMap<>(); - if (snapshotsInProgress != null) { - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - snapshotIndices.put(entry.snapshot(), - entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity()))); - if (entry.state() == State.STARTED) { - Map startedShards = new HashMap<>(); - Map snapshotShards = shardSnapshots.get(entry.snapshot()); - for (ObjectObjectCursor shard : entry.shards()) { - // Add all new shards to start processing on - if (localNodeId.equals(shard.value.nodeId())) { - if (shard.value.state() == State.INIT && (snapshotShards == null || !snapshotShards.containsKey(shard.key))) { - logger.trace("[{}] - Adding shard to the queue", shard.key); - startedShards.put(shard.key, IndexShardSnapshotStatus.newInitializing()); - } + final String localNodeId = clusterService.localNode().getId(); + for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { + final State entryState = entry.state(); + if (entryState == State.STARTED) { + Map startedShards = null; + final Snapshot snapshot = entry.snapshot(); + Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); + for (ObjectObjectCursor shard : entry.shards()) { + // Add all new shards to start processing on + final ShardId shardId = shard.key; + final ShardSnapshotStatus shardSnapshotStatus = shard.value; + if (localNodeId.equals(shardSnapshotStatus.nodeId()) && shardSnapshotStatus.state() == State.INIT + && snapshotShards.containsKey(shardId) == false) { + logger.trace("[{}] - Adding shard to the queue", shardId); + if (startedShards == null) { + startedShards = new HashMap<>(); } + startedShards.put(shardId, IndexShardSnapshotStatus.newInitializing()); } - if (!startedShards.isEmpty()) { - newSnapshots.put(entry.snapshot(), startedShards); - if (snapshotShards != null) { - // We already saw this snapshot but we need to add more started shards - Map shards = new HashMap<>(); - // Put all shards that were already running on this node - shards.putAll(snapshotShards); - // Put all newly started shards - shards.putAll(startedShards); - survivors.put(entry.snapshot(), unmodifiableMap(shards)); - } else { - // Brand new snapshot that we haven't seen before - survivors.put(entry.snapshot(), unmodifiableMap(startedShards)); + } + if (startedShards != null && startedShards.isEmpty() == false) { + shardSnapshots.computeIfAbsent(snapshot, s -> new HashMap<>()).putAll(startedShards); + startNewShards(entry, startedShards, masterNode); + } + } else if (entryState == State.ABORTED) { + // Abort all running shards for this snapshot + final Snapshot snapshot = entry.snapshot(); + Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); + for (ObjectObjectCursor shard : entry.shards()) { + final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); + if (snapshotStatus != null) { + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = + snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); + final Stage stage = lastSnapshotStatus.getStage(); + if (stage == Stage.FINALIZE) { + logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + + "letting it finish", snapshot, shard.key); + } else if (stage == Stage.DONE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + + "updating status on the master", snapshot, shard.key); + notifySuccessfulSnapshotShard(snapshot, shard.key, masterNode); + } else if (stage == Stage.FAILURE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + + "updating status on the master", snapshot, shard.key); + notifyFailedSnapshotShard(snapshot, shard.key, lastSnapshotStatus.getFailure(), masterNode); } - } - } else if (entry.state() == State.ABORTED) { - // Abort all running shards for this snapshot - Map snapshotShards = shardSnapshots.get(entry.snapshot()); - if (snapshotShards != null) { - final String failure = "snapshot has been aborted"; - for (ObjectObjectCursor shard : entry.shards()) { - final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); - if (snapshotStatus != null) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.abortIfNotCompleted(failure); - final Stage stage = lastSnapshotStatus.getStage(); - if (stage == Stage.FINALIZE) { - logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + - "letting it finish", entry.snapshot(), shard.key); - - } else if (stage == Stage.DONE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + - "updating status on the master", entry.snapshot(), shard.key); - notifySuccessfulSnapshotShard(entry.snapshot(), shard.key, localNodeId, masterNode); - - } else if (stage == Stage.FAILURE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + - "updating status on the master", entry.snapshot(), shard.key); - final String snapshotFailure = lastSnapshotStatus.getFailure(); - notifyFailedSnapshotShard(entry.snapshot(), shard.key, localNodeId, snapshotFailure, masterNode); - } - } + } else { + // due to CS batching we might have missed the INIT state and straight went into ABORTED + // notify master that abort has completed by moving to FAILED + if (shard.value.state() == State.ABORTED) { + notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason(), masterNode); } } } } } + } - // Update the list of snapshots that we saw and tried to started - // If startup of these shards fails later, we don't want to try starting these shards again - shutdownLock.lock(); - try { - shardSnapshots = unmodifiableMap(survivors); - if (shardSnapshots.isEmpty()) { - // Notify all waiting threads that no more snapshots - shutdownCondition.signalAll(); - } - } finally { - shutdownLock.unlock(); - } - - // We have new shards to starts - if (newSnapshots.isEmpty() == false) { - Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - for (final Map.Entry> entry : newSnapshots.entrySet()) { - final Snapshot snapshot = entry.getKey(); - final Map indicesMap = snapshotIndices.get(snapshot); - assert indicesMap != null; - - for (final Map.Entry shardEntry : entry.getValue().entrySet()) { - final ShardId shardId = shardEntry.getKey(); - final IndexId indexId = indicesMap.get(shardId.getIndexName()); - executor.execute(new AbstractRunnable() { + private void startNewShards(SnapshotsInProgress.Entry entry, Map startedShards, + DiscoveryNode masterNode) { + final Snapshot snapshot = entry.snapshot(); + final Map indicesMap = entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + for (final Map.Entry shardEntry : startedShards.entrySet()) { + final ShardId shardId = shardEntry.getKey(); + final IndexId indexId = indicesMap.get(shardId.getIndexName()); + assert indexId != null; + executor.execute(new AbstractRunnable() { - final SetOnce failure = new SetOnce<>(); + private final SetOnce failure = new SetOnce<>(); - @Override - public void doRun() { - final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); - assert indexId != null; - snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); - } + @Override + public void doRun() { + final IndexShard indexShard = + indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); + } - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", - shardId, snapshot), e); - failure.set(e); - } + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + failure.set(e); + } - @Override - public void onRejection(Exception e) { - failure.set(e); - } + @Override + public void onRejection(Exception e) { + failure.set(e); + } - @Override - public void onAfter() { - final Exception exception = failure.get(); - if (exception != null) { - final String failure = ExceptionsHelper.detailedMessage(exception); - notifyFailedSnapshotShard(snapshot, shardId, localNodeId, failure, masterNode); - } else { - notifySuccessfulSnapshotShard(snapshot, shardId, localNodeId, masterNode); - } - } - }); + @Override + public void onAfter() { + final Exception exception = failure.get(); + if (exception != null) { + notifyFailedSnapshotShard(snapshot, shardId, ExceptionsHelper.detailedMessage(exception), masterNode); + } else { + notifySuccessfulSnapshotShard(snapshot, shardId, masterNode); + } } - } + }); } } @@ -432,8 +407,6 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { if (snapshotsInProgress == null) { return; } - - final String localNodeId = event.state().nodes().getLocalNodeId(); final DiscoveryNode masterNode = event.state().nodes().getMasterNode(); for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { @@ -442,7 +415,6 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { ImmutableOpenMap masterShards = snapshot.shards(); for(Map.Entry localShard : localShards.entrySet()) { ShardId shardId = localShard.getKey(); - IndexShardSnapshotStatus localShardStatus = localShard.getValue(); ShardSnapshotStatus masterShard = masterShards.get(shardId); if (masterShard != null && masterShard.state().completed() == false) { final IndexShardSnapshotStatus.Copy indexShardSnapshotStatus = localShard.getValue().asCopy(); @@ -452,14 +424,13 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { // but we think the shard is done - we need to make new master know that the shard is done logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard is done locally, " + "updating status on the master", snapshot.snapshot(), shardId); - notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localNodeId, masterNode); + notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, masterNode); } else if (stage == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard failed locally, " + "updating status on master", snapshot.snapshot(), shardId); - final String failure = indexShardSnapshotStatus.getFailure(); - notifyFailedSnapshotShard(snapshot.snapshot(), shardId, localNodeId, failure, masterNode); + notifyFailedSnapshotShard(snapshot.snapshot(), shardId, indexShardSnapshotStatus.getFailure(), masterNode); } } } @@ -528,34 +499,64 @@ public String toString() { } /** Notify the master node that the given shard has been successfully snapshotted **/ - void notifySuccessfulSnapshotShard(final Snapshot snapshot, - final ShardId shardId, - final String localNodeId, - final DiscoveryNode masterNode) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(localNodeId, State.SUCCESS), masterNode); + private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, DiscoveryNode masterNode) { + sendSnapshotShardUpdate( + snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.SUCCESS), masterNode); } /** Notify the master node that the given shard failed to be snapshotted **/ - void notifyFailedSnapshotShard(final Snapshot snapshot, - final ShardId shardId, - final String localNodeId, - final String failure, - final DiscoveryNode masterNode) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(localNodeId, State.FAILED, failure), masterNode); + private void notifyFailedSnapshotShard(Snapshot snapshot, ShardId shardId, String failure, DiscoveryNode masterNode) { + sendSnapshotShardUpdate( + snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.FAILED, failure), masterNode); } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ - void sendSnapshotShardUpdate(final Snapshot snapshot, - final ShardId shardId, - final ShardSnapshotStatus status, - final DiscoveryNode masterNode) { + void sendSnapshotShardUpdate(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status, DiscoveryNode masterNode) { try { if (masterNode.getVersion().onOrAfter(Version.V_6_1_0)) { UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME); } else { - UpdateSnapshotStatusRequestV6 requestV6 = new UpdateSnapshotStatusRequestV6(snapshot, shardId, status); - transportService.sendRequest(masterNode, UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6, requestV6, INSTANCE_SAME); + remoteFailedRequestDeduplicator.executeOnce( + new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status), + new ActionListener() { + @Override + public void onResponse(Void aVoid) { + logger.trace("[{}] [{}] updated snapshot state", snapshot, status); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); + } + }, + (req, reqListener) -> transportService.sendRequest( + transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, req, + new TransportResponseHandler() { + @Override + public UpdateIndexShardSnapshotStatusResponse read(StreamInput in) throws IOException { + final UpdateIndexShardSnapshotStatusResponse response = new UpdateIndexShardSnapshotStatusResponse(); + response.readFrom(in); + return response; + } + + @Override + public void handleResponse(UpdateIndexShardSnapshotStatusResponse response) { + reqListener.onResponse(null); + } + + @Override + public void handleException(TransportException exp) { + reqListener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }) + ); } } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); @@ -588,11 +589,11 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS }); } - class SnapshotStateExecutor implements ClusterStateTaskExecutor { + private class SnapshotStateExecutor implements ClusterStateTaskExecutor { @Override public ClusterTasksResult - execute(ClusterState currentState, List tasks) throws Exception { + execute(ClusterState currentState, List tasks) { final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots != null) { int changedCount = 0; @@ -622,8 +623,6 @@ class SnapshotStateExecutor implements ClusterStateTaskExecutor 0) { logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount); - - final SnapshotsInProgress updatedSnapshots = - new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterTasksResult.builder().successes(tasks).build( - ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build()); + return ClusterTasksResult.builder().successes(tasks) + .build(ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, + new SnapshotsInProgress(unmodifiableList(entries))).build()); } } return ClusterTasksResult.builder().successes(tasks).build(currentState); @@ -646,13 +643,14 @@ static class UpdateIndexShardSnapshotStatusResponse extends ActionResponse { } - class UpdateSnapshotStatusAction extends - TransportMasterNodeAction { - UpdateSnapshotStatusAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, actionName, transportService, clusterService, threadPool, - actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new); + private class UpdateSnapshotStatusAction + extends TransportMasterNodeAction { + UpdateSnapshotStatusAction(TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super( + settings, SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, + actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new + ); } @Override @@ -667,7 +665,7 @@ protected UpdateIndexShardSnapshotStatusResponse newResponse() { @Override protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, - ActionListener listener) throws Exception { + ActionListener listener) { innerUpdateSnapshotState(request, listener); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index c7bf91b476c5b..998ab2a38639b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -83,7 +83,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; @@ -98,9 +100,9 @@ * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, ActionListener)} method kicks in and initializes * the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state
  • *
  • Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes - * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(ClusterChangedEvent)} method
  • + * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots} method *
  • Once shard snapshot is created data node updates state of the shard in the cluster state using - * the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus, DiscoveryNode)} method
  • + * the {@link SnapshotShardsService#sendSnapshotShardUpdate} method *
  • When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot * as completed
  • *
  • After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository, @@ -121,6 +123,12 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final Map>> snapshotCompletionListeners = new ConcurrentHashMap<>(); + // Set of snapshots that are currently being initialized by this node + private final Set initializingSnapshots = Collections.synchronizedSet(new HashSet<>()); + + // Set of snapshots that are currently being ended by this node + private final Set endingSnapshots = Collections.synchronizedSet(new HashSet<>()); + @Inject public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) { @@ -207,7 +215,7 @@ public List snapshots(final String repositoryName, } final ArrayList snapshotList = new ArrayList<>(snapshotSet); CollectionUtil.timSort(snapshotList); - return Collections.unmodifiableList(snapshotList); + return unmodifiableList(snapshotList); } /** @@ -223,7 +231,7 @@ public List currentSnapshots(final String repositoryName) { snapshotList.add(inProgressSnapshot(entry)); } CollectionUtil.timSort(snapshotList); - return Collections.unmodifiableList(snapshotList); + return unmodifiableList(snapshotList); } /** @@ -269,7 +277,7 @@ public ClusterState execute(ClusterState currentState) { if (snapshots == null || snapshots.entries().isEmpty()) { // Store newSnapshot here to be processed in clusterStateProcessed List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, - request.indicesOptions(), request.indices())); + request.indicesOptions(), request.indices())); logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); List snapshotIndices = repositoryData.resolveNewIndices(indices); newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId), @@ -280,6 +288,7 @@ public ClusterState execute(ClusterState currentState) { System.currentTimeMillis(), repositoryData.getGenId(), null); + initializingSnapshots.add(newSnapshot.snapshot()); snapshots = new SnapshotsInProgress(newSnapshot); } else { throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); @@ -290,6 +299,9 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); + if (newSnapshot != null) { + initializingSnapshots.remove(newSnapshot.snapshot()); + } newSnapshot = null; listener.onFailure(e); } @@ -297,7 +309,21 @@ public void onFailure(String source, Exception e) { @Override public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { if (newSnapshot != null) { - beginSnapshot(newState, newSnapshot, request.partial(), listener); + final Snapshot current = newSnapshot.snapshot(); + assert initializingSnapshots.contains(current); + beginSnapshot(newState, newSnapshot, request.partial(), new ActionListener() { + @Override + public void onResponse(final Snapshot snapshot) { + initializingSnapshots.remove(snapshot); + listener.onResponse(snapshot); + } + + @Override + public void onFailure(final Exception e) { + initializingSnapshots.remove(current); + listener.onFailure(e); + } + }); } } @@ -305,7 +331,6 @@ public void clusterStateProcessed(String source, ClusterState oldState, final Cl public TimeValue timeout() { return request.masterNodeTimeout(); } - }); } @@ -368,8 +393,11 @@ private void beginSnapshot(final ClusterState clusterState, boolean snapshotCreated; + boolean hadAbortedInitializations; + @Override protected void doRun() { + assert initializingSnapshots.contains(snapshot.snapshot()); Repository repository = repositoriesService.repository(snapshot.snapshot().getRepository()); MetaData metaData = clusterState.metaData(); @@ -394,9 +422,6 @@ protected void doRun() { } clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - SnapshotsInProgress.Entry endSnapshot; - String failure; - @Override public ClusterState execute(ClusterState currentState) { SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); @@ -407,9 +432,13 @@ public ClusterState execute(ClusterState currentState) { continue; } - if (entry.state() != State.ABORTED) { - // Replace the snapshot that was just intialized - ImmutableOpenMap shards = + if (entry.state() == State.ABORTED) { + entries.add(entry); + assert entry.shards().isEmpty(); + hadAbortedInitializations = true; + } else { + // Replace the snapshot that was just initialized + ImmutableOpenMap shards = shards(currentState, entry.indices()); if (!partial) { Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, @@ -417,9 +446,6 @@ public ClusterState execute(ClusterState currentState) { Set missing = indicesWithMissingShards.v1(); Set closed = indicesWithMissingShards.v2(); if (missing.isEmpty() == false || closed.isEmpty() == false) { - endSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); - entries.add(endSnapshot); - final StringBuilder failureMessage = new StringBuilder(); if (missing.isEmpty() == false) { failureMessage.append("Indices don't have primary shards "); @@ -432,24 +458,15 @@ public ClusterState execute(ClusterState currentState) { failureMessage.append("Indices are closed "); failureMessage.append(closed); } - failure = failureMessage.toString(); + entries.add(new SnapshotsInProgress.Entry(entry, State.FAILED, shards, failureMessage.toString())); continue; } } - SnapshotsInProgress.Entry updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); - entries.add(updatedSnapshot); - if (completed(shards.values())) { - endSnapshot = updatedSnapshot; - } - } else { - assert entry.state() == State.ABORTED : "expecting snapshot to be aborted during initialization"; - failure = "snapshot was aborted during initialization"; - endSnapshot = entry; - entries.add(endSnapshot); + entries.add(new SnapshotsInProgress.Entry(entry, State.STARTED, shards)); } } return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))) .build(); } @@ -478,12 +495,12 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS // should still exist when listener is registered. userCreateSnapshotListener.onResponse(snapshot.snapshot()); - // Now that snapshot completion listener is registered we can end the snapshot if needed - // We should end snapshot only if 1) we didn't accept it for processing (which happens when there - // is nothing to do) and 2) there was a snapshot in metadata that we should end. Otherwise we should - // go ahead and continue working on this snapshot rather then end here. - if (endSnapshot != null) { - endSnapshot(endSnapshot, failure); + if (hadAbortedInitializations) { + final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE); + assert snapshotsInProgress != null; + final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); + assert entry != null; + endSnapshot(entry); } } }); @@ -525,7 +542,7 @@ public void onFailure(Exception e) { cleanupAfterError(e); } - public void onNoLongerMaster(String source) { + public void onNoLongerMaster() { userCreateSnapshotListener.onFailure(e); } @@ -552,7 +569,7 @@ private void cleanupAfterError(Exception exception) { } - private SnapshotInfo inProgressSnapshot(SnapshotsInProgress.Entry entry) { + private static SnapshotInfo inProgressSnapshot(SnapshotsInProgress.Entry entry) { return new SnapshotInfo(entry.snapshot().getSnapshotId(), entry.indices().stream().map(IndexId::getName).collect(Collectors.toList()), entry.startTime(), entry.includeGlobalState()); @@ -610,7 +627,7 @@ public List currentSnapshots(final String repository, builder.add(entry); } } - return Collections.unmodifiableList(builder); + return unmodifiableList(builder); } /** @@ -666,7 +683,7 @@ public Map snapshotShards(final String reposi return unmodifiableMap(shardStatus); } - private SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { + private static SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { for (SnapshotShardFailure shardFailure : shardFailures) { if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) { return shardFailure; @@ -680,14 +697,28 @@ public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { // We don't remove old master when master flips anymore. So, we need to check for change in master - if (event.nodesRemoved() || event.previousState().nodes().isLocalNodeElectedMaster() == false) { - processSnapshotsOnRemovedNodes(event); + final SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); + final boolean newMaster = event.previousState().nodes().isLocalNodeElectedMaster() == false; + if (snapshotsInProgress != null) { + if (newMaster || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes())) { + processSnapshotsOnRemovedNodes(); + } + if (event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event)) { + processStartedShards(); + } + // Cleanup all snapshots that have no more work left: + // 1. Completed snapshots + // 2. Snapshots in state INIT that the previous master failed to start + // 3. Snapshots in any other state that have all their shard tasks completed + snapshotsInProgress.entries().stream().filter( + entry -> entry.state().completed() + || initializingSnapshots.contains(entry.snapshot()) == false + && (entry.state() == State.INIT || completed(entry.shards().values())) + ).forEach(this::endSnapshot); } - if (event.routingTableChanged()) { - processStartedShards(event); + if (newMaster) { + finalizeSnapshotDeletionFromPreviousMaster(event); } - removeFinishedSnapshotFromClusterState(event); - finalizeSnapshotDeletionFromPreviousMaster(event); } } catch (Exception e) { logger.warn("Failed to update snapshot state ", e); @@ -706,166 +737,134 @@ public void applyClusterState(ClusterChangedEvent event) { * snapshot was deleted and a call to GET snapshots would reveal that the snapshot no longer exists. */ private void finalizeSnapshotDeletionFromPreviousMaster(ClusterChangedEvent event) { - if (event.localNodeMaster() && event.previousState().nodes().isLocalNodeElectedMaster() == false) { - SnapshotDeletionsInProgress deletionsInProgress = event.state().custom(SnapshotDeletionsInProgress.TYPE); - if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - assert deletionsInProgress.getEntries().size() == 1 : "only one in-progress deletion allowed per cluster"; - SnapshotDeletionsInProgress.Entry entry = deletionsInProgress.getEntries().get(0); - deleteSnapshotFromRepository(entry.getSnapshot(), null, entry.getRepositoryStateId()); - } + SnapshotDeletionsInProgress deletionsInProgress = event.state().custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + assert deletionsInProgress.getEntries().size() == 1 : "only one in-progress deletion allowed per cluster"; + SnapshotDeletionsInProgress.Entry entry = deletionsInProgress.getEntries().get(0); + deleteSnapshotFromRepository(entry.getSnapshot(), null, entry.getRepositoryStateId()); } } /** - * Removes a finished snapshot from the cluster state. This can happen if the previous - * master node processed a cluster state update that marked the snapshot as finished, - * but the previous master node died before removing the snapshot in progress from the - * cluster state. It is then the responsibility of the new master node to end the - * snapshot and remove it from the cluster state. + * Cleans up shard snapshots that were running on removed nodes */ - private void removeFinishedSnapshotFromClusterState(ClusterChangedEvent event) { - if (event.localNodeMaster() && !event.previousState().nodes().isLocalNodeElectedMaster()) { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - if (snapshotsInProgress != null && !snapshotsInProgress.entries().isEmpty()) { - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - if (entry.state().completed()) { - endSnapshot(entry); + private void processSnapshotsOnRemovedNodes() { + clusterService.submitStateUpdateTask("update snapshot state after node removal", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + DiscoveryNodes nodes = currentState.nodes(); + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots == null) { + return currentState; + } + boolean changed = false; + ArrayList entries = new ArrayList<>(); + for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { + SnapshotsInProgress.Entry updatedSnapshot = snapshot; + if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { + ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); + boolean snapshotChanged = false; + for (ObjectObjectCursor shardEntry : snapshot.shards()) { + ShardSnapshotStatus shardStatus = shardEntry.value; + if (!shardStatus.state().completed() && shardStatus.nodeId() != null) { + if (nodes.nodeExists(shardStatus.nodeId())) { + shards.put(shardEntry.key, shardEntry.value); + } else { + // TODO: Restart snapshot on another node? + snapshotChanged = true; + logger.warn("failing snapshot of shard [{}] on closed node [{}]", + shardEntry.key, shardStatus.nodeId()); + shards.put(shardEntry.key, + new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown")); + } + } + } + if (snapshotChanged) { + changed = true; + ImmutableOpenMap shardsMap = shards.build(); + if (!snapshot.state().completed() && completed(shardsMap.values())) { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shardsMap); + } else { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, snapshot.state(), shardsMap); + } + } + entries.add(updatedSnapshot); + } else if (snapshot.state() == State.INIT && initializingSnapshots.contains(snapshot.snapshot()) == false) { + changed = true; + // Mark the snapshot as aborted as it failed to start from the previous master + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); + entries.add(updatedSnapshot); + + // Clean up the snapshot that failed to start from the old master + deleteSnapshot(snapshot.snapshot(), new ActionListener() { + @Override + public void onResponse(Void aVoid) { + logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshot()); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshot()); + } + }, updatedSnapshot.getRepositoryStateId(), false); } } + if (changed) { + return ClusterState.builder(currentState) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); + } + return currentState; } - } + + @Override + public void onFailure(String source, Exception e) { + logger.warn("failed to update snapshot state after node removal"); + } + }); } - /** - * Cleans up shard snapshots that were running on removed nodes - * - * @param event cluster changed event - */ - private void processSnapshotsOnRemovedNodes(ClusterChangedEvent event) { - if (removedNodesCleanupNeeded(event)) { - // Check if we just became the master - final boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster(); - clusterService.submitStateUpdateTask("update snapshot state after node removal", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - DiscoveryNodes nodes = currentState.nodes(); - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - if (snapshots == null) { - return currentState; - } + private void processStartedShards() { + clusterService.submitStateUpdateTask("update snapshot state after shards started", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + RoutingTable routingTable = currentState.routingTable(); + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots != null) { boolean changed = false; ArrayList entries = new ArrayList<>(); for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { SnapshotsInProgress.Entry updatedSnapshot = snapshot; - boolean snapshotChanged = false; - if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { - ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - for (ObjectObjectCursor shardEntry : snapshot.shards()) { - ShardSnapshotStatus shardStatus = shardEntry.value; - if (!shardStatus.state().completed() && shardStatus.nodeId() != null) { - if (nodes.nodeExists(shardStatus.nodeId())) { - shards.put(shardEntry.key, shardEntry.value); - } else { - // TODO: Restart snapshot on another node? - snapshotChanged = true; - logger.warn("failing snapshot of shard [{}] on closed node [{}]", - shardEntry.key, shardStatus.nodeId()); - shards.put(shardEntry.key, new ShardSnapshotStatus(shardStatus.nodeId(), - State.FAILED, "node shutdown")); - } - } - } - if (snapshotChanged) { + if (snapshot.state() == State.STARTED) { + ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), + routingTable); + if (shards != null) { changed = true; - ImmutableOpenMap shardsMap = shards.build(); - if (!snapshot.state().completed() && completed(shardsMap.values())) { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shardsMap); - endSnapshot(updatedSnapshot); + if (!snapshot.state().completed() && completed(shards.values())) { + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shards); } else { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, snapshot.state(), shardsMap); + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, shards); } } entries.add(updatedSnapshot); - } else if (snapshot.state() == State.INIT && newMaster) { - changed = true; - // Mark the snapshot as aborted as it failed to start from the previous master - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); - entries.add(updatedSnapshot); - - // Clean up the snapshot that failed to start from the old master - deleteSnapshot(snapshot.snapshot(), new ActionListener() { - @Override - public void onResponse(Void aVoid) { - logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshot()); - } - - @Override - public void onFailure(Exception e) { - logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshot()); - } - }, updatedSnapshot.getRepositoryStateId(), false); } } if (changed) { - snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } - return currentState; - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn("failed to update snapshot state after node removal"); - } - }); - } - } - - private void processStartedShards(ClusterChangedEvent event) { - if (waitingShardsStartedOrUnassigned(event)) { - clusterService.submitStateUpdateTask("update snapshot state after shards started", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - RoutingTable routingTable = currentState.routingTable(); - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - if (snapshots != null) { - boolean changed = false; - ArrayList entries = new ArrayList<>(); - for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { - SnapshotsInProgress.Entry updatedSnapshot = snapshot; - if (snapshot.state() == State.STARTED) { - ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), - routingTable); - if (shards != null) { - changed = true; - if (!snapshot.state().completed() && completed(shards.values())) { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.SUCCESS, shards); - endSnapshot(updatedSnapshot); - } else { - updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, shards); - } - } - entries.add(updatedSnapshot); - } - } - if (changed) { - snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } + return ClusterState.builder(currentState) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); } - return currentState; } + return currentState; + } - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> - new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); - } - }); - } + @Override + public void onFailure(String source, Exception e) { + logger.warn(() -> + new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); + } + }); } - private ImmutableOpenMap processWaitingShards( + private static ImmutableOpenMap processWaitingShards( ImmutableOpenMap snapshotShards, RoutingTable routingTable) { boolean snapshotChanged = false; ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); @@ -905,19 +904,16 @@ private ImmutableOpenMap processWaitingShards( } } - private boolean waitingShardsStartedOrUnassigned(ClusterChangedEvent event) { - SnapshotsInProgress curr = event.state().custom(SnapshotsInProgress.TYPE); - if (curr != null) { - for (SnapshotsInProgress.Entry entry : curr.entries()) { - if (entry.state() == State.STARTED && !entry.waitingIndices().isEmpty()) { - for (ObjectCursor index : entry.waitingIndices().keys()) { - if (event.indexRoutingTableChanged(index.value)) { - IndexRoutingTable indexShardRoutingTable = event.state().getRoutingTable().index(index.value); - for (ShardId shardId : entry.waitingIndices().get(index.value)) { - ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.id()).primaryShard(); - if (shardRouting != null && (shardRouting.started() || shardRouting.unassigned())) { - return true; - } + private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snapshotsInProgress, ClusterChangedEvent event) { + for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { + if (entry.state() == State.STARTED) { + for (ObjectCursor index : entry.waitingIndices().keys()) { + if (event.indexRoutingTableChanged(index.value)) { + IndexRoutingTable indexShardRoutingTable = event.state().getRoutingTable().index(index.value); + for (ShardId shardId : entry.waitingIndices().get(index.value)) { + ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.id()).primaryShard(); + if (shardRouting != null && (shardRouting.started() || shardRouting.unassigned())) { + return true; } } } @@ -927,28 +923,12 @@ private boolean waitingShardsStartedOrUnassigned(ClusterChangedEvent event) { return false; } - private boolean removedNodesCleanupNeeded(ClusterChangedEvent event) { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - if (snapshotsInProgress == null) { - return false; - } - // Check if we just became the master - boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster(); - for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { - if (newMaster && (snapshot.state() == State.SUCCESS || snapshot.state() == State.INIT)) { - // We just replaced old master and snapshots in intermediate states needs to be cleaned - return true; - } - for (DiscoveryNode node : event.nodesDelta().removedNodes()) { - for (ObjectCursor shardStatus : snapshot.shards().values()) { - if (!shardStatus.value.state().completed() && node.getId().equals(shardStatus.value.nodeId())) { - // At least one shard was running on the removed node - we need to fail it - return true; - } - } - } - } - return false; + private static boolean removedNodesCleanupNeeded(SnapshotsInProgress snapshotsInProgress, List removedNodes) { + // If at least one shard was running on a removed node - we need to fail it + return removedNodes.isEmpty() == false && snapshotsInProgress.entries().stream().flatMap(snapshot -> + StreamSupport.stream(((Iterable) () -> snapshot.shards().valuesIt()).spliterator(), false) + .filter(s -> s.state().completed() == false).map(ShardSnapshotStatus::nodeId)) + .anyMatch(removedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet())::contains); } /** @@ -981,25 +961,16 @@ private Tuple, Set> indicesWithMissingShards( * * @param entry snapshot */ - void endSnapshot(final SnapshotsInProgress.Entry entry) { - endSnapshot(entry, null); - } - - - /** - * Finalizes the shard in repository and then removes it from cluster state - *

    - * This is non-blocking method that runs on a thread from SNAPSHOT thread pool - * - * @param entry snapshot - * @param failure failure reason or null if snapshot was successful - */ - private void endSnapshot(final SnapshotsInProgress.Entry entry, final String failure) { + private void endSnapshot(final SnapshotsInProgress.Entry entry) { + if (endingSnapshots.add(entry.snapshot()) == false) { + return; + } threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { @Override protected void doRun() { final Snapshot snapshot = entry.snapshot(); final Repository repository = repositoriesService.repository(snapshot.getRepository()); + final String failure = entry.failure(); logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshot, entry.state(), failure); ArrayList shardFailures = new ArrayList<>(); for (ObjectObjectCursor shardStatus : entry.shards()) { @@ -1015,7 +986,7 @@ protected void doRun() { entry.startTime(), failure, entry.shards().size(), - Collections.unmodifiableList(shardFailures), + unmodifiableList(shardFailures), entry.getRepositoryStateId(), entry.includeGlobalState()); removeSnapshotFromClusterState(snapshot, snapshotInfo, null); @@ -1033,7 +1004,7 @@ public void onFailure(final Exception e) { /** * Removes record of running snapshot from cluster state - * @param snapshot snapshot + * @param snapshot snapshot * @param snapshotInfo snapshot info if snapshot was successful * @param e exception if snapshot failed */ @@ -1043,11 +1014,11 @@ private void removeSnapshotFromClusterState(final Snapshot snapshot, final Snaps /** * Removes record of running snapshot from cluster state and notifies the listener when this action is complete - * @param snapshot snapshot + * @param snapshot snapshot * @param failure exception if snapshot failed * @param listener listener to notify when snapshot information is removed from the cluster state */ - private void removeSnapshotFromClusterState(final Snapshot snapshot, final SnapshotInfo snapshotInfo, final Exception failure, + private void removeSnapshotFromClusterState(final Snapshot snapshot, @Nullable SnapshotInfo snapshotInfo, final Exception failure, @Nullable CleanupAfterErrorListener listener) { clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() { @@ -1065,8 +1036,8 @@ public ClusterState execute(ClusterState currentState) { } } if (changed) { - snapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); + return ClusterState.builder(currentState) + .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(unmodifiableList(entries))).build(); } } return currentState; @@ -1075,6 +1046,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); + endingSnapshots.remove(snapshot); if (listener != null) { listener.onFailure(e); } @@ -1082,8 +1054,9 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { + endingSnapshots.remove(snapshot); if (listener != null) { - listener.onNoLongerMaster(source); + listener.onNoLongerMaster(); } } @@ -1101,6 +1074,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS logger.warn("Failed to notify listeners", e); } } + endingSnapshots.remove(snapshot); if (listener != null) { listener.onResponse(snapshotInfo); } @@ -1131,14 +1105,20 @@ public void deleteSnapshot(final String repositoryName, final String snapshotNam .filter(s -> s.getName().equals(snapshotName)) .findFirst(); // if nothing found by the same name, then look in the cluster state for current in progress snapshots + long repoGenId = repositoryData.getGenId(); if (matchedEntry.isPresent() == false) { - matchedEntry = currentSnapshots(repositoryName, Collections.emptyList()).stream() - .map(e -> e.snapshot().getSnapshotId()).filter(s -> s.getName().equals(snapshotName)).findFirst(); + Optional matchedInProgress = currentSnapshots(repositoryName, Collections.emptyList()).stream() + .filter(s -> s.snapshot().getSnapshotId().getName().equals(snapshotName)).findFirst(); + if (matchedInProgress.isPresent()) { + matchedEntry = matchedInProgress.map(s -> s.snapshot().getSnapshotId()); + // Derive repository generation if a snapshot is in progress because it will increment the generation when it finishes + repoGenId = matchedInProgress.get().getRepositoryStateId() + 1L; + } } if (matchedEntry.isPresent() == false) { throw new SnapshotMissingException(repositoryName, snapshotName); } - deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repositoryData.getGenId(), immediatePriority); + deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repoGenId, immediatePriority); } /** @@ -1201,10 +1181,12 @@ public ClusterState execute(ClusterState currentState) throws Exception { final ImmutableOpenMap shards; final State state = snapshotEntry.state(); + final String failure; if (state == State.INIT) { // snapshot is still initializing, mark it as aborted shards = snapshotEntry.shards(); - + assert shards.isEmpty(); + failure = "Snapshot was aborted during initialization"; } else if (state == State.STARTED) { // snapshot is started - mark every non completed shard as aborted final ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); @@ -1216,7 +1198,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { shardsBuilder.put(shardEntry.key, status); } shards = shardsBuilder.build(); - + failure = "Snapshot was aborted by deletion"; } else { boolean hasUncompletedShards = false; // Cleanup in case a node gone missing and snapshot wasn't updated for some reason @@ -1237,10 +1219,10 @@ public ClusterState execute(ClusterState currentState) throws Exception { // where we force to finish the snapshot logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately"); shards = snapshotEntry.shards(); - endSnapshot(snapshotEntry); } + failure = snapshotEntry.failure(); } - SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards); + SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards, failure); clusterStateBuilder.putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(newSnapshot)); } return clusterStateBuilder.build(); @@ -1391,7 +1373,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS * @param indices list of indices to be snapshotted * @return list of shard to be included into current snapshot */ - private ImmutableOpenMap shards(ClusterState clusterState, List indices) { + private static ImmutableOpenMap shards(ClusterState clusterState, + List indices) { ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); MetaData metaData = clusterState.metaData(); for (IndexId index : indices) { @@ -1416,8 +1399,6 @@ private ImmutableOpenMap shard builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated")); } else if (primary.relocating() || primary.initializing()) { - // The WAITING state was introduced in V1.2.0 - - // don't use it if there are nodes with older version in the cluster builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.WAITING)); } else if (!primary.started()) { builder.put(shardId, From a506f4746f7def5a033e71398f2e5d391d08e7cb Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 4 Mar 2019 13:11:38 +0100 Subject: [PATCH 20/39] Update release notes for 6.7.0 --- docs/reference/release-notes/6.7.asciidoc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/reference/release-notes/6.7.asciidoc b/docs/reference/release-notes/6.7.asciidoc index a9a1310f57818..21bd757348673 100644 --- a/docs/reference/release-notes/6.7.asciidoc +++ b/docs/reference/release-notes/6.7.asciidoc @@ -101,6 +101,9 @@ Authorization:: CCR:: * Add ccr follow info api {pull}37408[#37408] (issue: {issue}37127[#37127]) +CRUD:: +* Make `_doc` work as an alias of the actual type of an index. {pull}39505[#39505] (issue: {issue}39469[#39469]) + Features/ILM:: * [ILM] Add unfollow action {pull}36970[#36970] (issue: {issue}34648[#34648]) @@ -260,6 +263,7 @@ Infra/Scripting:: * Add getZone to JodaCompatibleZonedDateTime {pull}37084[#37084] Infra/Settings:: +* Provide a clearer error message on keystore add {pull}39327[#39327] (issue: {issue}39324[#39324]) * Separate out validation of groups of settings {pull}34184[#34184] License:: @@ -298,6 +302,7 @@ Rollup:: * Replace the TreeMap in the composite aggregation {pull}36675[#36675] SQL:: +* SQL: Enhance checks for inexact fields {pull}39427[#39427] (issue: {issue}38501[#38501]) * SQL: change the default precision for CURRENT_TIMESTAMP function {pull}39391[#39391] (issue: {issue}39288[#39288]) * SQL: add "validate.properties" property to JDBC's allowed list of settings {pull}39050[#39050] (issue: {issue}38068[#38068]) * SQL: Allow look-ahead resolution of aliases for WHERE clause {pull}38450[#38450] (issue: {issue}29983[#29983]) @@ -456,6 +461,7 @@ Geo:: * Geo: Do not normalize the longitude with value -180 for Lucene shapes {pull}37299[#37299] (issue: {issue}37297[#37297]) Infra/Core:: +* Correct name of basic_date_time_no_millis {pull}39367[#39367] * Fix DateFormatters.parseMillis when no timezone is given {pull}39100[#39100] (issue: {issue}39067[#39067]) * Prefix java formatter patterns with '8' {pull}38712[#38712] (issue: {issue}38567[#38567]) * Bubble-up exceptions from scheduler {pull}38317[#38317] (issue: {issue}38014[#38014]) @@ -508,6 +514,10 @@ Recovery:: * RecoveryMonitor#lastSeenAccessTime should be volatile {pull}36781[#36781] SQL:: +* SQL: Fix merging of incompatible multi-fields {pull}39560[#39560] (issue: {issue}39547[#39547]) +* SQL: fix COUNT DISTINCT column name {pull}39537[#39537] (issue: {issue}39511[#39511]) +* SQL: ignore UNSUPPORTED fields for JDBC and ODBC modes in 'SYS COLUMNS' {pull}39518[#39518] (issue: {issue}39471[#39471]) +* SQL: Use underlying exact field for LIKE/RLIKE {pull}39443[#39443] (issue: {issue}39442[#39442]) * SQL: enforce JDBC driver - ES server version parity {pull}38972[#38972] (issue: {issue}38775[#38775]) * SQL: fall back to using the field name for column label {pull}38842[#38842] (issue: {issue}38831[#38831]) * SQL: Prevent grouping over grouping functions {pull}38649[#38649] (issue: {issue}38308[#38308]) @@ -531,6 +541,7 @@ SQL:: * SQL: Fix issue with always false filter involving functions {pull}36830[#36830] (issue: {issue}35980[#35980]) * SQL: protocol returns ISO 8601 String formatted dates instead of Long for JDBC/ODBC requests {pull}36800[#36800] (issue: {issue}36756[#36756]) * SQL: Enhance Verifier to prevent aggregate or grouping functions from {pull}36799[#36799] (issue: {issue}36798[#36798]) +* SQL: normalized keywords shouldn't be allowed for groupings and sorting [ISSUE] {pull}35203[#35203] Search:: * Fix simple query string serialization conditional {pull}38960[#38960] (issues: {issue}21504[#21504], {issue}38889[#38889]) @@ -546,7 +557,9 @@ Security:: * Fix potential NPE in UsersTool {pull}37660[#37660] Snapshot/Restore:: +* Fix Concurrent Snapshot Ending And Stabilize Snapshot Finalization {pull}38368[#38368] (issue: {issue}38226[#38226]) * Fix Two Races that Lead to Stuck Snapshots {pull}37686[#37686] (issues: {issue}32265[#32265], {issue}32348[#32348]) +* Fix Race in Concurrent Snapshot Delete and Create {pull}37612[#37612] (issue: {issue}37581[#37581]) * Streamline S3 Repository- and Client-Settings {pull}37393[#37393] * SNAPSHOTS: Upgrade GCS Dependencies to 1.55.0 {pull}36634[#36634] (issues: {issue}35229[#35229], {issue}35459[#35459]) From bc1884915da8e67419f074dd8b6b151a93855365 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Mon, 4 Mar 2019 15:29:10 +0100 Subject: [PATCH 21/39] SQL: Don't allow inexact fields for MIN/MAX (#39563) MIN/MAX on strings are supported and are implemented with TopAggs FIRST/LAST respectively, but they cannot operate on `text` fields without underlying `keyword` fields => inexact. Follows: #39427 --- .../xpack/sql/expression/function/aggregate/Max.java | 3 ++- .../xpack/sql/expression/function/aggregate/Min.java | 3 ++- .../analyzer/VerifierErrorMessagesTests.java | 12 ++++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java index cd03ea85e4558..5827083343a0f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java @@ -13,6 +13,7 @@ import java.util.List; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isExact; import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumericOrDate; /** @@ -47,7 +48,7 @@ public String innerName() { @Override protected TypeResolution resolveType() { if (field().dataType().isString()) { - return TypeResolution.TYPE_RESOLVED; + return isExact(field(), sourceText(), ParamOrdinal.DEFAULT); } else { return isNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java index 07fa44769b2db..e64774fe8e720 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java @@ -13,6 +13,7 @@ import java.util.List; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isExact; import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumericOrDate; /** @@ -50,7 +51,7 @@ public String innerName() { @Override protected TypeResolution resolveType() { if (field().dataType().isString()) { - return TypeResolution.TYPE_RESOLVED; + return isExact(field(), sourceText(), ParamOrdinal.DEFAULT); } else { return isNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index dfeb44dfe2165..3c19b84ac4e3a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -717,6 +717,18 @@ public void testTopHitsGroupByHavingUnsupported() { error("SELECT FIRST(int) FROM test GROUP BY text HAVING FIRST(int) > 10")); } + public void testMinOnInexactUnsupported() { + assertEquals("1:8: [MIN(text)] cannot operate on field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT MIN(text) FROM test")); + } + + public void testMaxOnInexactUnsupported() { + assertEquals("1:8: [MAX(text)] cannot operate on field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT MAX(text) FROM test")); + } + public void testMinOnKeywordGroupByHavingUnsupported() { assertEquals("1:52: HAVING filter is unsupported for function [MIN(keyword)]", error("SELECT MIN(keyword) FROM test GROUP BY text HAVING MIN(keyword) > 10")); From 46392f2d3060a3f860317a94e5706ddd99bd51f3 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 4 Mar 2019 16:53:05 +0100 Subject: [PATCH 22/39] mute test --- .../java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index 8661a6fb249b7..faa7dc787869d 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -208,6 +208,7 @@ public void testAutoFollowing() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39355") public void testCannotFollowLeaderInUpgradedCluster() throws Exception { assumeTrue("Tests only runs with upgrade_state [all]", upgradeState == UpgradeState.ALL); assumeTrue("Put follow api does not restore from ccr repository before 6.7.0", From e1fb408e65a6ffd0582ad83427060d2ba5313175 Mon Sep 17 00:00:00 2001 From: Prabhakar S Date: Mon, 4 Mar 2019 23:04:09 +0530 Subject: [PATCH 23/39] Fixing the custom object serialization bug in diffable utils. (#39544) While serializing custom objects, the length of the list is computed after filtering out the unsupported objects but while writing objects the filter is not applied thus resulting in writing unsupported objects which will fail to deserialize by the receiever. Adding the condition to filter out unsupported custom objects. --- .../elasticsearch/cluster/DiffableUtils.java | 6 +- .../ClusterSerializationTests.java | 144 ++++++++++++++++++ 2 files changed, 148 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index 78eceeb12bcca..725da675952f2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -474,8 +474,10 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeVInt(upsertsCount); for (Map.Entry entry : upserts.entrySet()) { - keySerializer.writeKey(entry.getKey(), out); - valueSerializer.write(entry.getValue(), out); + if(valueSerializer.supportsVersion(entry.getValue(), version)) { + keySerializer.writeKey(entry.getKey(), out); + valueSerializer.write(entry.getValue(), out); + } } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 9ae3ba3fb033a..645e4725881ba 100644 --- a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -20,11 +20,14 @@ package org.elasticsearch.cluster.serialization; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -39,7 +42,9 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.VersionUtils; @@ -47,6 +52,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -205,4 +211,142 @@ public void testObjectReuseWhenApplyingClusterStateDiff() throws Exception { assertSame("template", serializedClusterState2.metaData().templates().get("test-template"), serializedClusterState3.metaData().templates().get("test-template")); } + + public static class TestCustomOne extends AbstractNamedDiffable implements Custom { + + public static final String TYPE = "test_custom_one"; + private final String strObject; + + public TestCustomOne(String strObject) { + this.strObject = strObject; + } + + public TestCustomOne(StreamInput in) throws IOException { + this.strObject = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(strObject); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("custom_string_object", strObject); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + } + + public static class TestCustomTwo extends AbstractNamedDiffable implements Custom { + + public static final String TYPE = "test_custom_two"; + private final Integer intObject; + + public TestCustomTwo(Integer intObject) { + this.intObject = intObject; + } + + public TestCustomTwo(StreamInput in) throws IOException { + this.intObject = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(intObject); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("custom_integer_object", intObject); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } + + } + + public void testCustomSerialization() throws Exception { + ClusterState.Builder builder = ClusterState.builder(ClusterState.EMPTY_STATE) + .putCustom(TestCustomOne.TYPE, new TestCustomOne("test_custom_one")) + .putCustom(TestCustomTwo.TYPE, new TestCustomTwo(10)); + + ClusterState clusterState = builder.incrementVersion().build(); + + Diff diffs = clusterState.diff(ClusterState.EMPTY_STATE); + + // Add the new customs to named writeables + final List entries = ClusterModule.getNamedWriteables(); + entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TestCustomOne.TYPE, TestCustomOne::new)); + entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, TestCustomOne.TYPE, TestCustomOne::readDiffFrom)); + entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TestCustomTwo.TYPE, TestCustomTwo::new)); + entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, TestCustomTwo.TYPE, TestCustomTwo::readDiffFrom)); + + // serialize with current version + BytesStreamOutput outStream = new BytesStreamOutput(); + Version version = Version.CURRENT; + outStream.setVersion(version); + diffs.writeTo(outStream); + StreamInput inStream = outStream.bytes().streamInput(); + + inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(entries)); + inStream.setVersion(version); + Diff serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode()); + ClusterState stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE); + + // Current version - Both the customs are non null + assertThat(stateAfterDiffs.custom(TestCustomOne.TYPE), notNullValue()); + assertThat(stateAfterDiffs.custom(TestCustomTwo.TYPE), notNullValue()); + + // serialize with minimum compatibile version + outStream = new BytesStreamOutput(); + version = Version.CURRENT.minimumCompatibilityVersion(); + outStream.setVersion(version); + diffs.writeTo(outStream); + inStream = outStream.bytes().streamInput(); + + inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(entries)); + inStream.setVersion(version); + serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode()); + stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE); + + // Old version - TestCustomOne is null and TestCustomTwo is not null + assertThat(stateAfterDiffs.custom(TestCustomOne.TYPE), nullValue()); + assertThat(stateAfterDiffs.custom(TestCustomTwo.TYPE), notNullValue()); + } + } From f86f1eef4389fef18fedbb012037e97519bea551 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 4 Mar 2019 12:46:22 -0800 Subject: [PATCH 24/39] [DOCS] Updates API in Watcher transform context (#39540) --- .../painless-contexts/painless-watcher-context-example.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc b/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc index 2d2e3993e3233..fa78b4855f210 100644 --- a/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc @@ -104,7 +104,7 @@ The following example shows the use of metadata and transforming dates into a re [source,Painless] ---- -POST _xpack/watcher/watch/_execute +POST _watcher/watch/_execute { "watch" : { "metadata" : { "min_hits": 10000 }, From 0799ff1b0680482a3aaedf86376d8e02a0e86d19 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 4 Mar 2019 08:59:02 -0500 Subject: [PATCH 25/39] Assert recovery done in testDoNotWaitForPendingSeqNo (#39595) Since #39006 we should be able to complete a peer-recovery without waiting for pending indexing operations. Thus, the assertion in testDoNotWaitForPendingSeqNo should be updated from false to true. Closes #39510 --- .../RecoveryDuringReplicationTests.java | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 2af4249c907a0..c5ed6ffcf08d8 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -439,7 +439,6 @@ public void testResyncAfterPrimaryPromotion() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39510") public void testDoNotWaitForPendingSeqNo() throws Exception { IndexMetaData metaData = buildIndexMetaData(1); @@ -490,20 +489,14 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); CountDownLatch recoveryStart = new CountDownLatch(1); - AtomicBoolean opsSent = new AtomicBoolean(false); + AtomicBoolean recoveryDone = new AtomicBoolean(false); final Future recoveryFuture = shards.asyncRecoverReplica(newReplica, (indexShard, node) -> { recoveryStart.countDown(); return new RecoveryTarget(indexShard, node, recoveryListener, l -> {}) { @Override - public void indexTranslogOperations( - final List operations, - final int totalTranslogOps, - final long maxSeenAutoIdTimestamp, - final long msu, - final RetentionLeases retentionLeases, - final ActionListener listener) { - opsSent.set(true); - super.indexTranslogOperations(operations, totalTranslogOps, maxSeenAutoIdTimestamp, msu, retentionLeases, listener); + public void finalizeRecovery(long globalCheckpoint, ActionListener listener) { + recoveryDone.set(true); + super.finalizeRecovery(globalCheckpoint, listener); } }; }); @@ -514,7 +507,7 @@ public void indexTranslogOperations( final int indexedDuringRecovery = shards.indexDocs(randomInt(5)); docs += indexedDuringRecovery; - assertBusy(() -> assertFalse("recovery should not wait for on pending docs", opsSent.get())); + assertBusy(() -> assertTrue("recovery should not wait for on pending docs", recoveryDone.get())); primaryEngineFactory.releaseLatchedIndexers(); pendingDocsDone.await(); From 63eb0bb6e2e0c10b3a1bdcd1927b96c3459ec17e Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Mon, 4 Mar 2019 15:37:54 -0700 Subject: [PATCH 26/39] Check for .watches that wasn't upgraded properly (#39609) If the `.watches` index was created in 5.6 and is still a concrete index, rather than an alias for `.watches-6`, that means that it was not properly upgraded with the Migration Upgrade API before upgrading to Elasticsearch 6.x. In this case, calling the Migration Upgrade API will resolve the problem with the `.watches` index. This isn't going to be a common case, as Watcher will not run properly in this situation, but we should handle it and notify the user of the correct action, just in case. --- .../deprecation/IndexDeprecationChecks.java | 6 ++++++ .../IndexDeprecationChecksTests.java | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index e30a9e4a985f4..1e9585f614988 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -112,6 +112,12 @@ static DeprecationIssue oldIndicesCheck(IndexMetaData indexMetaData) { "The .tasks index was created before version 6.0 and cannot be opened in 7.0. " + "You must delete this index and allow it to be re-created by Elasticsearch. If you wish to preserve task history, "+ "reindex this index to a new index before deleting it."); + } else if (".watches".equals(indexMetaData.getIndex().getName())) { + return new DeprecationIssue(DeprecationIssue.Level.CRITICAL, + ".watches was not properly upgraded before upgrading to Elasticsearch 6", + "https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-upgrade.html", + "The .watches index was created before version 6.0, and was not properly upgraded in 5.6. " + + "Please upgrade this index using the Migration Upgrade API."); } if ((mappingCount == 2 && !hasDefaultMapping) || mappingCount > 2) { diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 2543b6dfb9f12..c39f2a2bad153 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -63,6 +63,23 @@ public void testOldTasksIndexCheck() { assertEquals(singletonList(expected), issues); } + public void testUnupgradedWatcherIndexCheck() { + Version createdWith = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + VersionUtils.getPreviousVersion(Version.V_6_0_0)); + IndexMetaData indexMetaData = IndexMetaData.builder(".watches") + .settings(settings(createdWith)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.CRITICAL, + ".watches was not properly upgraded before upgrading to Elasticsearch 6", + "https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-upgrade.html", + "The .watches index was created before version 6.0, and was not properly upgraded in 5.6. " + + "Please upgrade this index using the Migration Upgrade API."); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); + assertEquals(singletonList(expected), issues); + } + public void testMultipleTypesCheckWithDefaultMapping() throws IOException { String mappingName1 = randomAlphaOfLengthBetween(2, 5); String mappingJson1 = "{\n" + From fc76bcbfe0a3c3874271f9d8f3443187e91f6941 Mon Sep 17 00:00:00 2001 From: lcawl Date: Mon, 4 Mar 2019 15:06:00 -0800 Subject: [PATCH 27/39] [DOCS] Sorts security APIs --- x-pack/docs/en/rest-api/security.asciidoc | 6 +++--- x-pack/docs/en/rest-api/security/create-api-keys.asciidoc | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index c59c44312ae60..7e14a6a0ee9b7 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -81,6 +81,7 @@ include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] include::security/clear-roles-cache.asciidoc[] +include::security/create-api-keys.asciidoc[] include::security/put-app-privileges.asciidoc[] include::security/create-role-mappings.asciidoc[] include::security/create-roles.asciidoc[] @@ -91,14 +92,13 @@ include::security/delete-roles.asciidoc[] include::security/delete-users.asciidoc[] include::security/disable-users.asciidoc[] include::security/enable-users.asciidoc[] +include::security/get-api-keys.asciidoc[] include::security/get-app-privileges.asciidoc[] include::security/get-role-mappings.asciidoc[] include::security/get-roles.asciidoc[] include::security/get-tokens.asciidoc[] include::security/get-users.asciidoc[] include::security/has-privileges.asciidoc[] +include::security/invalidate-api-keys.asciidoc[] include::security/invalidate-tokens.asciidoc[] include::security/ssl.asciidoc[] -include::security/create-api-keys.asciidoc[] -include::security/invalidate-api-keys.asciidoc[] -include::security/get-api-keys.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc index 741a9d79feaf0..a55111c82afee 100644 --- a/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-create-api-key]] === Create API Key API +++++ +Create API keys +++++ Creates an API key for access without requiring basic authentication. From a0c0d2807d4013ced53360be297bb5de0b022082 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 5 Mar 2019 12:13:25 +0200 Subject: [PATCH 28/39] Fix security index auto-create and state recovery race (#39582) Previously, the security index could be wrongfully recreated. This might happen if the index was interpreted as missing, as in the case of a fresh install, but the index existed and the state did not yet recover. This fix will return HTTP SERVICE_UNAVAILABLE (503) for requests that try to write to the security index before the state has not been recovered yet. --- .../support/SecurityIndexManager.java | 14 +++- .../support/SecurityIndexManagerTests.java | 73 +++++++++++++++++++ 2 files changed, 85 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index ac865165e0d22..d751ac538eaf2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; @@ -40,6 +41,7 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.template.TemplateUtils; @@ -81,7 +83,7 @@ public class SecurityIndexManager implements ClusterStateListener { private volatile State indexState; public SecurityIndexManager(Client client, String indexName, ClusterService clusterService) { - this(client, indexName, new State(false, false, false, false, null, null, null)); + this(client, indexName, State.UNRECOVERED_STATE); clusterService.addListener(this); } @@ -121,6 +123,10 @@ public boolean isMappingUpToDate() { return this.indexState.mappingUpToDate; } + public boolean isStateRecovered() { + return this.indexState != State.UNRECOVERED_STATE; + } + public ElasticsearchException getUnavailableReason() { final State localState = this.indexState; if (localState.indexAvailable) { @@ -297,7 +303,10 @@ public void checkIndexVersionThenExecute(final Consumer consumer, fin public void prepareIndexIfNeededThenExecute(final Consumer consumer, final Runnable andThen) { final State indexState = this.indexState; // use a local copy so all checks execute against the same state! // TODO we should improve this so we don't fire off a bunch of requests to do the same thing (create or update mappings) - if (indexState.indexExists && indexState.isIndexUpToDate == false) { + if (indexState == State.UNRECOVERED_STATE) { + consumer.accept(new ElasticsearchStatusException("Cluster state has not been recovered yet, cannot write to the security index", + RestStatus.SERVICE_UNAVAILABLE)); + } else if (indexState.indexExists && indexState.isIndexUpToDate == false) { consumer.accept(new IllegalStateException( "Security index is not on the current version. Security features relying on the index will not be available until " + "the upgrade API is run on the security index")); @@ -377,6 +386,7 @@ public static boolean isIndexDeleted(State previousState, State currentState) { * State of the security index. */ public static class State { + public static final State UNRECOVERED_STATE = new State(false, false, false, false, null, null, null); public final boolean indexExists; public final boolean isIndexUpToDate; public final boolean indexAvailable; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 459ffeb6a6037..1a5ae40144ab9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -15,6 +15,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; @@ -27,6 +28,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; @@ -41,10 +43,13 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.security.test.SecurityTestUtils; @@ -56,6 +61,10 @@ import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_TEMPLATE_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.TEMPLATE_VERSION_PATTERN; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -73,6 +82,7 @@ public void setUpManager() { final Client mockClient = mock(Client.class); final ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(threadPool.generic()).thenReturn(EsExecutors.newDirectExecutorService()); when(mockClient.threadPool()).thenReturn(threadPool); when(mockClient.settings()).thenReturn(Settings.EMPTY); final ClusterService clusterService = mock(ClusterService.class); @@ -196,6 +206,67 @@ public void testIndexHealthChangeListeners() throws Exception { assertEquals(ClusterHealthStatus.GREEN, currentState.get().indexStatus); } + public void testWriteBeforeStateNotRecovered() throws Exception { + final AtomicBoolean prepareRunnableCalled = new AtomicBoolean(false); + final AtomicReference prepareException = new AtomicReference<>(null); + manager.prepareIndexIfNeededThenExecute(ex -> { + prepareException.set(ex); + }, () -> { + prepareRunnableCalled.set(true); + }); + assertThat(prepareException.get(), is(notNullValue())); + assertThat(prepareException.get(), instanceOf(ElasticsearchStatusException.class)); + assertThat(((ElasticsearchStatusException)prepareException.get()).status(), is(RestStatus.SERVICE_UNAVAILABLE)); + assertThat(prepareRunnableCalled.get(), is(false)); + prepareException.set(null); + prepareRunnableCalled.set(false); + // state not recovered + final ClusterBlocks.Builder blocks = ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK); + manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME).blocks(blocks))); + manager.prepareIndexIfNeededThenExecute(ex -> { + prepareException.set(ex); + }, () -> { + prepareRunnableCalled.set(true); + }); + assertThat(prepareException.get(), is(notNullValue())); + assertThat(prepareException.get(), instanceOf(ElasticsearchStatusException.class)); + assertThat(((ElasticsearchStatusException)prepareException.get()).status(), is(RestStatus.SERVICE_UNAVAILABLE)); + assertThat(prepareRunnableCalled.get(), is(false)); + prepareException.set(null); + prepareRunnableCalled.set(false); + // state recovered with index + ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME, + SecurityIndexManager.INTERNAL_INDEX_FORMAT); + markShardsAvailable(clusterStateBuilder); + manager.clusterChanged(event(clusterStateBuilder)); + manager.prepareIndexIfNeededThenExecute(ex -> { + prepareException.set(ex); + }, () -> { + prepareRunnableCalled.set(true); + }); + assertThat(prepareException.get(), is(nullValue())); + assertThat(prepareRunnableCalled.get(), is(true)); + } + + public void testListeneredNotCalledBeforeStateNotRecovered() throws Exception { + final AtomicBoolean listenerCalled = new AtomicBoolean(false); + manager.addIndexStateListener((prev, current) -> { + listenerCalled.set(true); + }); + final ClusterBlocks.Builder blocks = ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK); + // state not recovered + manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME).blocks(blocks))); + assertThat(manager.isStateRecovered(), is(false)); + assertThat(listenerCalled.get(), is(false)); + // state recovered with index + ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME, + SecurityIndexManager.INTERNAL_INDEX_FORMAT); + markShardsAvailable(clusterStateBuilder); + manager.clusterChanged(event(clusterStateBuilder)); + assertThat(manager.isStateRecovered(), is(true)); + assertThat(listenerCalled.get(), is(true)); + } + public void testIndexOutOfDateListeners() throws Exception { final AtomicBoolean listenerCalled = new AtomicBoolean(false); manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME))); @@ -240,12 +311,14 @@ private void assertInitialState() { assertThat(manager.indexExists(), Matchers.equalTo(false)); assertThat(manager.isAvailable(), Matchers.equalTo(false)); assertThat(manager.isMappingUpToDate(), Matchers.equalTo(false)); + assertThat(manager.isStateRecovered(), Matchers.equalTo(false)); } private void assertIndexUpToDateButNotAvailable() { assertThat(manager.indexExists(), Matchers.equalTo(true)); assertThat(manager.isAvailable(), Matchers.equalTo(false)); assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); + assertThat(manager.isStateRecovered(), Matchers.equalTo(true)); } public static ClusterState.Builder createClusterState(String indexName, String templateName) throws IOException { From 2c10c934cdf8b2cb65414567d807a0866c513aa7 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 5 Mar 2019 13:05:50 +0200 Subject: [PATCH 29/39] Mute TokenAuthIntegTests.testExpiredTokensDeletedAfterExpiration (#39690) Relates #39581 --- .../elasticsearch/xpack/security/authc/TokenAuthIntegTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index b9d484b263125..fdbf70f3be618 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -132,6 +132,7 @@ public void testTokenServiceCanRotateKeys() throws Exception { } @TestLogging("org.elasticsearch.xpack.security.authc:DEBUG") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39581") public void testExpiredTokensDeletedAfterExpiration() throws Exception { final Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, From be10934d0999655181465ab21d18e383ba550fe8 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 5 Mar 2019 13:26:21 +0200 Subject: [PATCH 30/39] Revert "unmute EvilLoggerTests#testDeprecatedSettings (#38743)" This reverts commit f2b4dd500312cb8bbb6b9d645bb9e3e650edeb2a. Relates #35990 --- .../java/org/elasticsearch/common/logging/EvilLoggerTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index 017cf9eecf85f..847338ede4871 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -253,6 +253,7 @@ public void testDeprecationLoggerMaybeLog() throws IOException, UserException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35990") public void testDeprecatedSettings() throws IOException, UserException { setupLogging("settings"); From 49ff38494d55503c0ea4081d3e5d6b5136f883fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 5 Mar 2019 14:30:10 +0100 Subject: [PATCH 31/39] Fix Fuzziness#asDistance(String) (#39643) Currently Fuzziness#asDistance(String) doesn't work for custom AUTO values. If the fuzziness is AUTO, the method returns the correct edit distance to use, depending on the input string, but for custom AUTO values it currently always returns an edit distance of 1. Correcting this and adding unit and integration tests to catch these cases. Closes #39614 --- .../elasticsearch/common/unit/Fuzziness.java | 2 +- .../common/unit/FuzzinessTests.java | 24 +++++++++++++++++ .../search/query/SearchQueryIT.java | 26 +++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java index aee0e9cd02ada..834277b5c7282 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java @@ -186,7 +186,7 @@ public int asDistance() { } public int asDistance(String text) { - if (this.equals(AUTO)) { //AUTO + if (this.equals(AUTO) || isAutoWithCustomValues()) { //AUTO final int len = termLen(text); if (len < lowDistance) { return 0; diff --git a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java index 446d3c6eeceeb..da54687da0d71 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java @@ -167,4 +167,28 @@ private static Fuzziness doSerializeRoundtrip(Fuzziness in) throws IOException { StreamInput streamInput = output.bytes().streamInput(); return new Fuzziness(streamInput); } + + public void testAsDistanceString() { + Fuzziness fuzziness = Fuzziness.build("0"); + assertEquals(0, fuzziness.asDistance(randomAlphaOfLengthBetween(0, 10))); + fuzziness = Fuzziness.build("1"); + assertEquals(1, fuzziness.asDistance(randomAlphaOfLengthBetween(0, 10))); + fuzziness = Fuzziness.build("2"); + assertEquals(2, fuzziness.asDistance(randomAlphaOfLengthBetween(0, 10))); + + fuzziness = Fuzziness.build("AUTO"); + assertEquals(0, fuzziness.asDistance("")); + assertEquals(0, fuzziness.asDistance("ab")); + assertEquals(1, fuzziness.asDistance("abc")); + assertEquals(1, fuzziness.asDistance("abcde")); + assertEquals(2, fuzziness.asDistance("abcdef")); + + fuzziness = Fuzziness.build("AUTO:5,7"); + assertEquals(0, fuzziness.asDistance("")); + assertEquals(0, fuzziness.asDistance("abcd")); + assertEquals(1, fuzziness.asDistance("abcde")); + assertEquals(1, fuzziness.asDistance("abcdef")); + assertEquals(2, fuzziness.asDistance("abcdefg")); + + } } diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 17c97415a8a8f..bdc7d3462c519 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -722,6 +722,32 @@ public void testMatchQueryNumeric() throws Exception { expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); } + public void testMatchQueryFuzzy() throws Exception { + assertAcked(prepareCreate("test").addMapping("_doc", "text", "type=text")); + + indexRandom(true, client().prepareIndex("test", "_doc", "1").setSource("text", "Unit"), + client().prepareIndex("test", "_doc", "2").setSource("text", "Unity")); + + SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("0")).get(); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("1")).get(); + assertHitCount(searchResponse, 2L); + assertSearchHits(searchResponse, "1", "2"); + + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("AUTO")).get(); + assertHitCount(searchResponse, 2L); + assertSearchHits(searchResponse, "1", "2"); + + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("AUTO:5,7")).get(); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "unify").fuzziness("AUTO:5,7")).get(); + assertHitCount(searchResponse, 1L); + assertSearchHits(searchResponse, "2"); + } + + public void testMultiMatchQuery() throws Exception { createIndex("test"); From 4615e72578d6ba478dda94e99148ae0c801db0ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Cifuentes=20Garc=C3=ADa?= <33845232+srensamblador@users.noreply.github.com> Date: Tue, 5 Mar 2019 16:17:01 +0100 Subject: [PATCH 32/39] Improved Terms Aggregation documentation (#38892) Added a note after the first query example talking about fielddata. --- docs/reference/aggregations/bucket/terms-aggregation.asciidoc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 188b2ed3774c0..0c29d67cff060 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -61,13 +61,15 @@ GET /_search { "aggs" : { "genres" : { - "terms" : { "field" : "genre" } + "terms" : { "field" : "genre" } <1> } } } -------------------------------------------------- // CONSOLE // TEST[s/_search/_search\?filter_path=aggregations/] +<1> `terms` aggregation should be a field of type `keyword` or any other data type suitable for bucket aggregations. In order to use it with `text` you will need to enable +<>. Response: From 47bcf999ab42f5b78919d6046e0370f81643d339 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Tue, 5 Mar 2019 18:12:24 +0100 Subject: [PATCH 33/39] Removed incorrect ML YAML tests (#39400) A client cannot know that a job_id is already taken, so this test should not have been specified as a client test --- .../rest-api-spec/test/ml/jobs_crud.yml | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml index 049aa4da82b0a..1bac6b3d2ac49 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -236,24 +236,6 @@ "time_format":"yyyy-MM-dd HH:mm:ssX" } } - - do: - catch: param - xpack.ml.put_job: - job_id: jobs-crud-id-already-taken - body: > - { - "job_id":"jobs-crud-id-already-taken", - "description":"Analysis of response time by airline", - "analysis_config" : { - "bucket_span": "1h", - "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] - }, - "data_description" : { - "field_delimiter":",", - "time_field":"time", - "time_format":"yyyy-MM-dd HH:mm:ssX" - } - } --- "Test update job": From 5f1df2f64f6a788a4b0ebb8f87254bd6b31be7f6 Mon Sep 17 00:00:00 2001 From: jimczi Date: Tue, 5 Mar 2019 18:18:45 +0100 Subject: [PATCH 34/39] fix typo in synonym graph filter docs --- .../analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index 2cdf51e51f230..fa228abd74ac0 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -174,7 +174,8 @@ PUT /test_index Using `synonyms_path` to define WordNet synonyms in a file is supported as well. -=== Parsing synonym files +[float] +==== Parsing synonym files Elasticsearch will use the token filters preceding the synonym filter in a tokenizer chain to parse the entries in a synonym file. So, for example, if a @@ -186,7 +187,7 @@ parsing synonyms, e.g. `asciifolding` will only produce the folded version of th token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an error. -WARNING:The synonym rules should not contain words that are removed by +WARNING: The synonym rules should not contain words that are removed by a filter that appears after in the chain (a `stop` filter for instance). Removing a term from a synonym rule breaks the matching at query time. From 5e6953a5e9a09664f03f943f8fa4a1fd1d69fc6e Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 5 Mar 2019 09:50:58 -0700 Subject: [PATCH 35/39] Add documentation on remote recovery (#39483) This is related to #35975. It adds documentation on the remote recovery process. Additionally, it adds documentation about the various settings that can impact the process. --- docs/reference/ccr/getting-started.asciidoc | 5 ++ docs/reference/ccr/index.asciidoc | 1 + docs/reference/ccr/remote-recovery.asciidoc | 29 +++++++++++ docs/reference/settings/ccr-settings.asciidoc | 52 +++++++++++++++++++ .../settings/configuring-xes.asciidoc | 3 +- 5 files changed, 89 insertions(+), 1 deletion(-) create mode 100644 docs/reference/ccr/remote-recovery.asciidoc create mode 100644 docs/reference/settings/ccr-settings.asciidoc diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 9dca160893264..e0fd04ecf1731 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -261,6 +261,11 @@ PUT /server-metrics-copy/_ccr/follow?wait_for_active_shards=1 ////////////////////////// +The follower index is initialized using the <> +process. The remote recovery process transfers the existing Lucene segment files +from the leader to the follower. When the remote recovery process is complete, +the index following begins. + Now when you index documents into your leader index, you will see these documents replicated in the follower index. You can inspect the status of replication using the diff --git a/docs/reference/ccr/index.asciidoc b/docs/reference/ccr/index.asciidoc index b8131b4ae9f73..6b094b5837219 100644 --- a/docs/reference/ccr/index.asciidoc +++ b/docs/reference/ccr/index.asciidoc @@ -32,3 +32,4 @@ include::overview.asciidoc[] include::requirements.asciidoc[] include::auto-follow.asciidoc[] include::getting-started.asciidoc[] +include::remote-recovery.asciidoc[] diff --git a/docs/reference/ccr/remote-recovery.asciidoc b/docs/reference/ccr/remote-recovery.asciidoc new file mode 100644 index 0000000000000..fcf03cfc72814 --- /dev/null +++ b/docs/reference/ccr/remote-recovery.asciidoc @@ -0,0 +1,29 @@ +[role="xpack"] +[testenv="platinum"] +[[remote-recovery]] +== Remote recovery + +When you create a follower index, you cannot use it until it is fully initialized. +The _remote recovery_ process builds a new copy of a shard on a follower node by +copying data from the primary shard in the leader cluster. {es} uses this remote +recovery process to bootstrap a follower index using the data from the leader index. +This process provides the follower with a copy of the current state of the leader index, +even if a complete history of changes is not available on the leader due to Lucene +segment merging. + +Remote recovery is a network intensive process that transfers all of the Lucene +segment files from the leader cluster to the follower cluster. The follower +requests that a recovery session be initiated on the primary shard in the leader +cluster. The follower then requests file chunks concurrently from the leader. By +default, the process concurrently requests `5` large `1mb` file chunks. This default +behavior is designed to support leader and follower clusters with high network latency +between them. + +There are dynamic settings that you can use to rate-limit the transmitted data +and manage the resources consumed by remote recoveries. See +{ref}/ccr-settings.html[{ccr-cap} settings]. + +You can obtain information about an in-progress remote recovery by using the +{ref}/cat-recovery.html[recovery API] on the follower cluster. Remote recoveries +are implemented using the {ref}/modules-snapshots.html[snapshot and restore] infrastructure. This means that on-going remote recoveries are labelled as type +`snapshot` in the recovery API. diff --git a/docs/reference/settings/ccr-settings.asciidoc b/docs/reference/settings/ccr-settings.asciidoc new file mode 100644 index 0000000000000..286bb421662ff --- /dev/null +++ b/docs/reference/settings/ccr-settings.asciidoc @@ -0,0 +1,52 @@ +[role="xpack"] +[[ccr-settings]] +=== {ccr-cap} settings + +These {ccr} settings can be dynamically updated on a live cluster with the +<>. + +[float] +[[ccr-recovery-settings]] +==== Remote recovery settings + +The following setting can be used to rate-limit the data transmitted during +{stack-ov}/remote-recovery.html[remote recoveries]: + +`ccr.indices.recovery.max_bytes_per_sec` (<>):: +Limits the total inbound and outbound remote recovery traffic on each node. +Since this limit applies on each node, but there may be many nodes performing +remote recoveries concurrently, the total amount of remote recovery bytes may be +much higher than this limit. If you set this limit too high then there is a risk +that ongoing remote recoveries will consume an excess of bandwidth (or other +resources) which could destabilize the cluster. This setting is used by both the +leader and follower clusters. For example if it is set to `20mb` on a leader, +the leader will only send `20mb/s` to the follower even if the follower is +requesting and can accept `60mb/s`. Defaults to `40mb`. + +[float] +[[ccr-advanced-recovery-settings]] +==== Advanced remote recovery settings + +The following _expert_ settings can be set to manage the resources consumed by +remote recoveries: + +`ccr.indices.recovery.max_concurrent_file_chunks` (<>):: +Controls the number of file chunk requests that can be sent in parallel per +recovery. As multiple remote recoveries might already running in parallel, +increasing this expert-level setting might only help in situations where remote +recovery of a single shard is not reaching the total inbound and outbound remote recovery traffic as configured by `ccr.indices.recovery.max_bytes_per_sec`. +Defaults to `5`. The maximum allowed value is `10`. + +`ccr.indices.recovery.chunk_size`(<>):: +Controls the chunk size requested by the follower during file transfer. Defaults to +`1mb`. + +`ccr.indices.recovery.recovery_activity_timeout`(<>):: +Controls the timeout for recovery activity. This timeout primarily applies on +the leader cluster. The leader cluster must open resources in-memory to supply +data to the follower during the recovery process. If the leader does not receive recovery requests from the follower for this period of time, it will close the resources. Defaults to 60 seconds. + +`ccr.indices.recovery.internal_action_timeout` (<>):: +Controls the timeout for individual network requests during the remote recovery +process. An individual action timing out can fail the recovery. Defaults to +60 seconds. diff --git a/docs/reference/settings/configuring-xes.asciidoc b/docs/reference/settings/configuring-xes.asciidoc index 29c6b95dddf0f..48401c1a03433 100644 --- a/docs/reference/settings/configuring-xes.asciidoc +++ b/docs/reference/settings/configuring-xes.asciidoc @@ -6,7 +6,8 @@ ++++ include::{asciidoc-dir}/../../shared/settings.asciidoc[] +include::ccr-settings.asciidoc[] include::license-settings.asciidoc[] include::ml-settings.asciidoc[] -include::notification-settings.asciidoc[] include::sql-settings.asciidoc[] +include::notification-settings.asciidoc[] From 85654b48ece2a26796e0d36d23ca2bd5b215f39f Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Tue, 5 Mar 2019 11:45:17 -0700 Subject: [PATCH 36/39] Use any index specified by .watches for Watcher (#39541) (#39706) * Use any index specified by .watches for Watcher (#39541) Previously, Watcher only attached its listener to indices that started with the prefix `.watches`, which causes Watcher to silently fail to schedule newly created Watches if the `.watches` alias is redirected to an index that does not start with `.watches`. Watcher now attaches the listener to all indices, so that Watcher can respond to changes in which index has the `.watches` alias. Also adjusts the tests to randomly use non-prefixed concrete indices for .watches and .triggered_watches. --- .../elasticsearch/xpack/watcher/Watcher.java | 8 +-- .../watcher/WatcherConcreteIndexTests.java | 54 ++++++++++++++++ .../AbstractWatcherIntegrationTestCase.java | 61 ++++++++++++++++--- 3 files changed, 111 insertions(+), 12 deletions(-) create mode 100644 x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 71b37d934ab56..4c2f0cc2c5b04 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -608,11 +608,9 @@ public void onIndexModule(IndexModule module) { } assert listener != null; - // for now, we only add this index operation listener to indices starting with .watches - // this also means, that aliases pointing to this index have to follow this notation - if (module.getIndex().getName().startsWith(Watch.INDEX)) { - module.addIndexOperationListener(listener); - } + // Attach a listener to every index so that we can react to alias changes. + // This listener will be a no-op except on the index pointed to by .watches + module.addIndexOperationListener(listener); } static void validAutoCreateIndex(Settings settings, Logger logger) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java new file mode 100644 index 0000000000000..87d4f2d1de783 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.Locale; + +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.noneInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.greaterThan; + +public class WatcherConcreteIndexTests extends AbstractWatcherIntegrationTestCase { + + @Override + protected boolean timeWarped() { + return false; + } + + public void testCanUseAnyConcreteIndexName() throws Exception { + String newWatcherIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + String watchResultsIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); + createIndex(watchResultsIndex); + + stopWatcher(); + replaceWatcherIndexWithRandomlyNamedIndex(Watch.INDEX, newWatcherIndexName, Watch.DOC_TYPE); + startWatcher(); + + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("mywatch").setSource(watchBuilder() + .trigger(schedule(interval("3s"))) + .input(noneInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("indexer", indexAction(watchResultsIndex, "_doc"))) + .get(); + + assertTrue(putWatchResponse.isCreated()); + + assertBusy(() -> { + SearchResponse searchResult = client().prepareSearch(watchResultsIndex).setTrackTotalHits(true).get(); + assertThat((int) searchResult.getHits().getTotalHits(), greaterThan(0)); + }); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 4eb4bd1aa2c6e..ec035bb9066a0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -7,7 +7,9 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -15,6 +17,7 @@ import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.network.NetworkModule; @@ -192,7 +195,7 @@ public void _setup() throws Exception { internalCluster().setDisruptionScheme(ice); ice.startDisrupting(); } - + stopWatcher(); createWatcherIndicesOrAliases(); startWatcher(); } @@ -219,13 +222,19 @@ private void createWatcherIndicesOrAliases() throws Exception { // alias for .watches, setting the index template to the same as well String watchIndexName; String triggeredWatchIndexName; - if (rarely()) { - watchIndexName = ".watches-alias-index"; - CreateIndexResponse response = client().admin().indices().prepareCreate(watchIndexName) + if (randomBoolean()) { + // Create an index to get the template + String tempIndex = ".watches" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + CreateIndexResponse response = client().admin().indices().prepareCreate(tempIndex) .setCause("Index to test aliases with .watches index") .addAlias(new Alias(Watch.INDEX)) .get(); assertAcked(response); + + // Now replace it with a randomly named index + watchIndexName = randomAlphaOfLengthBetween(5,10).toLowerCase(Locale.ROOT); + replaceWatcherIndexWithRandomlyNamedIndex(Watch.INDEX, watchIndexName, Watch.DOC_TYPE); + logger.info("set alias for .watches index to [{}]", watchIndexName); } else { watchIndexName = Watch.INDEX; @@ -237,13 +246,19 @@ private void createWatcherIndicesOrAliases() throws Exception { } // alias for .triggered-watches, ensuring the index template is set appropriately - if (rarely()) { - triggeredWatchIndexName = ".triggered_watches-alias-index"; - CreateIndexResponse response = client().admin().indices().prepareCreate(triggeredWatchIndexName) + if (randomBoolean()) { + String tempIndex = ".triggered_watches-alias-index"; + CreateIndexResponse response = client().admin().indices().prepareCreate(tempIndex) .setCause("Index to test aliases with .triggered-watches index") .addAlias(new Alias(TriggeredWatchStoreField.INDEX_NAME)) .get(); assertAcked(response); + + // Now replace it with a randomly-named index + triggeredWatchIndexName = randomValueOtherThan(watchIndexName, + () -> randomAlphaOfLengthBetween(5,10).toLowerCase(Locale.ROOT)); + replaceWatcherIndexWithRandomlyNamedIndex(TriggeredWatchStoreField.INDEX_NAME, triggeredWatchIndexName, + TriggeredWatchStoreField.DOC_TYPE); logger.info("set alias for .triggered-watches index to [{}]", triggeredWatchIndexName); } else { triggeredWatchIndexName = TriggeredWatchStoreField.INDEX_NAME; @@ -257,6 +272,38 @@ private void createWatcherIndicesOrAliases() throws Exception { } } + public void replaceWatcherIndexWithRandomlyNamedIndex(String originalIndexOrAlias, String to, String docType) { + GetIndexResponse index = client().admin().indices().prepareGetIndex().setIndices(originalIndexOrAlias).get(); + MappingMetaData mapping = index.getMappings().get(index.getIndices()[0]).get(docType); + + Settings settings = index.getSettings().get(index.getIndices()[0]); + Settings.Builder newSettings = Settings.builder().put(settings); + newSettings.remove("index.provided_name"); + newSettings.remove("index.uuid"); + newSettings.remove("index.creation_date"); + newSettings.remove("index.version.created"); + + CreateIndexResponse createIndexResponse = client().admin().indices().prepareCreate(to) + .addMapping(docType, mapping.sourceAsMap()) + .setSettings(newSettings) + .get(); + assertTrue(createIndexResponse.isAcknowledged()); + ensureGreen(to); + + AtomicReference originalIndex = new AtomicReference<>(originalIndexOrAlias); + boolean watchesIsAlias = client().admin().indices().prepareAliasesExist(originalIndexOrAlias).get().isExists(); + if (watchesIsAlias) { + GetAliasesResponse aliasesResponse = client().admin().indices().prepareGetAliases(originalIndexOrAlias).get(); + assertEquals(1, aliasesResponse.getAliases().size()); + aliasesResponse.getAliases().forEach((aliasRecord) -> { + assertEquals(1, aliasRecord.value.size()); + originalIndex.set(aliasRecord.key); + }); + } + client().admin().indices().prepareDelete(originalIndex.get()).get(); + client().admin().indices().prepareAliases().addAlias(to, originalIndexOrAlias).get(); + } + protected TimeWarp timeWarp() { assert timeWarped() : "cannot access TimeWarp when test context is not time warped"; return timeWarp; From a7884895211431786c87788fea9278da7a528d23 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 5 Mar 2019 10:15:59 -0700 Subject: [PATCH 37/39] Add Docker build type (#39378) This commit adds a new build type (together with deb/rpm/tar/zip) to represent the official Docker images. This build type will be displayed in APIs such as the main and nodes info APIs. --- distribution/docker/src/docker/Dockerfile | 2 ++ server/src/main/java/org/elasticsearch/Build.java | 3 +++ 2 files changed, 5 insertions(+) diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index c202fa78668f5..5df3030faaa74 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -33,6 +33,8 @@ WORKDIR /usr/share/elasticsearch ${source_elasticsearch} RUN tar zxf /opt/${elasticsearch} --strip-components=1 +RUN grep ES_DISTRIBUTION_TYPE=tar /usr/share/elasticsearch/bin/elasticsearch-env \ + && sed -ie 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' /usr/share/elasticsearch/bin/elasticsearch-env RUN mkdir -p config data logs RUN chmod 0775 config data logs COPY config/elasticsearch.yml config/log4j2.properties config/ diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 9021e2d4c1f66..6f33775023d96 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -75,6 +75,7 @@ public static Flavor fromDisplayName(final String displayName) { public enum Type { DEB("deb"), + DOCKER("docker"), RPM("rpm"), TAR("tar"), ZIP("zip"), @@ -94,6 +95,8 @@ public static Type fromDisplayName(final String displayName) { switch (displayName) { case "deb": return Type.DEB; + case "docker": + return Type.DOCKER; case "rpm": return Type.RPM; case "tar": From 98afed7d22db91e825c81e360319309ca2423825 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 5 Mar 2019 22:02:31 -0500 Subject: [PATCH 38/39] Rename retention lease setting (#39719) This commit renames the retention lease setting index.soft_deletes.retention.lease so that it is under the namespace index.soft_deletes.retention_lease. As such, we rename the setting to index.soft_deletes.retention_lease.period. --- .../common/settings/IndexScopedSettings.java | 2 +- .../main/java/org/elasticsearch/index/IndexSettings.java | 8 ++++---- .../seqno/ReplicationTrackerRetentionLeaseTests.java | 2 +- .../org/elasticsearch/index/seqno/RetentionLeaseIT.java | 4 ++-- .../index/shard/IndexShardRetentionLeaseTests.java | 4 ++-- .../xpack/ccr/action/TransportResumeFollowAction.java | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 906b8c87083b7..7cbaccc005958 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -133,7 +133,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_GC_DELETES_SETTING, IndexSettings.INDEX_SOFT_DELETES_SETTING, IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, - IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING, + IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING, UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 27b9b1687db78..0b0117175d2c6 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -264,9 +264,9 @@ public final class IndexSettings { /** * Controls the maximum length of time since a retention lease is created or renewed before it is considered expired. */ - public static final Setting INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING = + public static final Setting INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING = Setting.timeSetting( - "index.soft_deletes.retention.lease", + "index.soft_deletes.retention_lease.period", TimeValue.timeValueHours(12), TimeValue.ZERO, Property.Dynamic, @@ -480,7 +480,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); softDeleteEnabled = version.onOrAfter(Version.V_6_5_0) && scopedSettings.get(INDEX_SOFT_DELETES_SETTING); softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); - retentionLeaseMillis = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING).millis(); + retentionLeaseMillis = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING).millis(); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); maxInnerResultWindow = scopedSettings.get(MAX_INNER_RESULT_WINDOW_SETTING); @@ -550,7 +550,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(DEFAULT_PIPELINE, this::setDefaultPipeline); scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations); scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_THROTTLED, this::setSearchThrottled); - scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING, this::setRetentionLeaseMillis); + scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, this::setRetentionLeaseMillis); } private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index f8781d0e78d40..68ffa3f86e799 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -336,7 +336,7 @@ private void runExpirationTest(final boolean primaryMode) { final Settings settings = Settings .builder() .put( - IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), + IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), TimeValue.timeValueMillis(retentionLeaseMillis)) .build(); final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index b789f63daac63..979a5e168bd74 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -213,7 +213,7 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { .prepareUpdateSettings("index") .setSettings( Settings.builder() - .putNull(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey()) + .putNull(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey()) .build()) .get(); assertTrue(longTtlResponse.isAcknowledged()); @@ -243,7 +243,7 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { .prepareUpdateSettings("index") .setSettings( Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), retentionLeaseTimeToLive) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), retentionLeaseTimeToLive) .build()) .get(); assertTrue(shortTtlResponse.isAcknowledged()); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java index 566d1feaf007d..a12a89b282103 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java @@ -144,7 +144,7 @@ private void runExpirationTest(final boolean primary) throws IOException { final Settings settings = Settings .builder() .put( - IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), + IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), TimeValue.timeValueMillis(retentionLeaseMillis)) .build(); // current time is mocked through the thread pool @@ -211,7 +211,7 @@ private void runExpirationTest(final boolean primary) throws IOException { public void testPersistence() throws IOException { final Settings settings = Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), Long.MAX_VALUE, TimeUnit.NANOSECONDS) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), Long.MAX_VALUE, TimeUnit.NANOSECONDS) .build(); final IndexShard indexShard = newStartedShard( true, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 3545bb52942db..607474a3fc3c3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -387,7 +387,7 @@ static String[] extractLeaderShardHistoryUUIDs(Map ccrIndexMetaD nonReplicatedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); nonReplicatedSettings.add(IndexSettings.ALLOW_UNMAPPED); nonReplicatedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); - nonReplicatedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING); nonReplicatedSettings.add(IndexSettings.MAX_SCRIPT_FIELDS_SETTING); nonReplicatedSettings.add(IndexSettings.MAX_REGEX_LENGTH_SETTING); nonReplicatedSettings.add(IndexSettings.MAX_TERMS_COUNT_SETTING); From 8acf59bdc260d0b349d188f2dd0bcd342564ab8c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 5 Mar 2019 22:19:19 -0500 Subject: [PATCH 39/39] Remove beta label from CCR (#39722) This commit removes the beta label from CCR. --- .../apis/auto-follow/delete-auto-follow-pattern.asciidoc | 2 -- .../ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc | 2 -- .../ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc | 2 -- docs/reference/ccr/apis/ccr-apis.asciidoc | 2 -- docs/reference/ccr/apis/follow/get-follow-info.asciidoc | 2 -- docs/reference/ccr/apis/follow/get-follow-stats.asciidoc | 2 -- docs/reference/ccr/apis/follow/post-pause-follow.asciidoc | 2 -- docs/reference/ccr/apis/follow/post-resume-follow.asciidoc | 2 -- docs/reference/ccr/apis/follow/post-unfollow.asciidoc | 2 -- docs/reference/ccr/apis/follow/put-follow.asciidoc | 2 -- docs/reference/ccr/apis/get-ccr-stats.asciidoc | 2 -- docs/reference/ccr/auto-follow.asciidoc | 2 -- docs/reference/ccr/getting-started.asciidoc | 2 -- docs/reference/ccr/index.asciidoc | 7 +------ docs/reference/ccr/overview.asciidoc | 2 -- docs/reference/ccr/requirements.asciidoc | 2 -- 16 files changed, 1 insertion(+), 36 deletions(-) diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index f64fb7e91d665..e2e91334402f7 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -6,8 +6,6 @@ Delete auto-follow pattern ++++ -beta[] - Delete auto-follow patterns. ==== Description diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index 3db92ce6222b0..9eb18b0aa00b9 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -6,8 +6,6 @@ Get auto-follow pattern ++++ -beta[] - Get auto-follow patterns. ==== Description diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index f1a4a974602cb..3ed6cd947028e 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -6,8 +6,6 @@ Create auto-follow pattern ++++ -beta[] - Creates an auto-follow pattern. ==== Description diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index 2009742c8322b..c7c5194790360 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -3,8 +3,6 @@ [[ccr-apis]] == Cross-cluster replication APIs -beta[] - You can use the following APIs to perform {ccr} operations. [float] diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 212b1167b6e33..eca2f5e8e98f9 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -6,8 +6,6 @@ Get follower info ++++ -beta[] - Retrieves information about all follower indices. ==== Description diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 8c02582e01278..73bdd9494d1c8 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -6,8 +6,6 @@ Get follower stats ++++ -beta[] - Get follower stats. ==== Description diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index f5b0bef7b2994..60de85cabdcbd 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -6,8 +6,6 @@ Pause follower ++++ -beta[] - Pauses a follower index. ==== Description diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 736061f2bfde8..279f4139cdddf 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -6,8 +6,6 @@ Resume follower ++++ -beta[] - Resumes a follower index. ==== Description diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index c3126d02d1efc..236d2723a94dc 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -6,8 +6,6 @@ Unfollow ++++ -beta[] - Converts a follower index to a regular index. ==== Description diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 52253d6ad2f4c..8098fcff1cd53 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -6,8 +6,6 @@ Create follower ++++ -beta[] - Creates a follower index. ==== Description diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 8949de8787fa7..5f1844f167bf7 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -6,8 +6,6 @@ Get CCR stats ++++ -beta[] - Get {ccr} stats. ==== Description diff --git a/docs/reference/ccr/auto-follow.asciidoc b/docs/reference/ccr/auto-follow.asciidoc index a7f4b95f42202..580b2b11d7244 100644 --- a/docs/reference/ccr/auto-follow.asciidoc +++ b/docs/reference/ccr/auto-follow.asciidoc @@ -3,8 +3,6 @@ [[ccr-auto-follow]] === Automatically following indices -beta[] - In time series use cases where you want to follow new indices that are periodically created (such as daily Beats indices), manually configuring follower indices for each new leader index can be an operational burden. The auto-follow diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index e0fd04ecf1731..db1b9f2e8c279 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -3,8 +3,6 @@ [[ccr-getting-started]] == Getting started with {ccr} -beta[] - This getting-started guide for {ccr} shows you how to: * <