From 95ce232bd1a6d5c6e8cb9b8219eceb6a76920635 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 19 Jun 2018 11:15:50 -0400 Subject: [PATCH 01/17] Docs: Advice for reindexing many indices (#31279) Folks tend to want to be able to make a single `_reindex` call to migrate many indices. You *can* do that and we even have an example of how to do that in the docs but it isn't always a good idea. This change adds some advice to the docs: generally you want to make one reindex call per index. Closes #22920 --- docs/reference/docs/reindex.asciidoc | 31 ++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index bdbffb0a08d5d..c04bbd6813795 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1028,11 +1028,38 @@ number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. +[float] +=== Reindexing many indices +If you have many indices to reindex it is generally better to reindex them +one at a time rather than using a glob pattern to pick up many indices. That +way you can resume the process if there are any errors by removing the +partially completed index and starting over at that index. It also makes +parallelizing the process fairly simple: split the list of indices to reindex +and run each list in parallel. + +One off bash scripts seem to work nicely for this: + +[source,bash] +---------------------------------------------------------------- +for index in i1 i2 i3 i4 i5; do + curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ + "source": { + "index": "'$index'" + }, + "dest": { + "index": "'$index'-reindexed" + } + }' +done +---------------------------------------------------------------- +// NOTCONSOLE + [float] === Reindex daily indices -You can use `_reindex` in combination with <> -to reindex daily indices to apply a new template to the existing documents. +Notwithstanding the above advice, you can use `_reindex` in combination with +<> to reindex daily indices to apply +a new template to the existing documents. Assuming you have indices consisting of documents as follows: From 002b74fe5bbb3588b1c9c823b107e8724841e0aa Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 19 Jun 2018 08:49:32 -0700 Subject: [PATCH 02/17] [DOCS] Add code snippet testing for more ML APIs (#31404) --- x-pack/docs/build.gradle | 3 --- x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc | 3 ++- x-pack/docs/en/rest-api/ml/validate-detector.asciidoc | 3 +-- x-pack/docs/en/rest-api/ml/validate-job.asciidoc | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 371a8ce4acacf..6035e75ec1638 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -57,7 +57,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/license/delete-license.asciidoc', 'en/rest-api/license/update-license.asciidoc', 'en/ml/api-quickref.asciidoc', - 'en/rest-api/ml/delete-calendar-event.asciidoc', 'en/rest-api/ml/delete-snapshot.asciidoc', 'en/rest-api/ml/forecast.asciidoc', 'en/rest-api/ml/get-bucket.asciidoc', @@ -71,8 +70,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/ml/preview-datafeed.asciidoc', 'en/rest-api/ml/revert-snapshot.asciidoc', 'en/rest-api/ml/update-snapshot.asciidoc', - 'en/rest-api/ml/validate-detector.asciidoc', - 'en/rest-api/ml/validate-job.asciidoc', 'en/rest-api/watcher/stats.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc b/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc index 73458f3179197..ef8dad39dba70 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc +++ b/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc @@ -44,7 +44,7 @@ calendar: DELETE _xpack/ml/calendars/planned-outages/events/LS8LJGEBMTCMA-qz49st -------------------------------------------------- // CONSOLE -// TEST[skip:automatically-generated ID] +// TEST[catch:missing] When the event is removed, you receive the following results: [source,js] @@ -53,3 +53,4 @@ When the event is removed, you receive the following results: "acknowledged": true } ---- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc b/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc index f688ef91cfe53..ab8a0de442cf8 100644 --- a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc +++ b/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc @@ -28,7 +28,6 @@ see <>. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Examples @@ -45,7 +44,6 @@ POST _xpack/ml/anomaly_detectors/_validate/detector } -------------------------------------------------- // CONSOLE -// TEST[skip:todo] When the validation completes, you receive the following results: [source,js] @@ -54,3 +52,4 @@ When the validation completes, you receive the following results: "acknowledged": true } ---- +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc b/x-pack/docs/en/rest-api/ml/validate-job.asciidoc index 61d0c70514e8d..0ccc5bc04e1d1 100644 --- a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc +++ b/x-pack/docs/en/rest-api/ml/validate-job.asciidoc @@ -28,7 +28,6 @@ see <>. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Examples @@ -56,7 +55,6 @@ POST _xpack/ml/anomaly_detectors/_validate } -------------------------------------------------- // CONSOLE -// TEST[skip:todo] When the validation is complete, you receive the following results: [source,js] @@ -65,3 +63,4 @@ When the validation is complete, you receive the following results: "acknowledged": true } ---- +// TESTRESPONSE \ No newline at end of file From 317007b8cdede27bda45024e2521409054f90439 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 19 Jun 2018 19:03:31 +0200 Subject: [PATCH 03/17] Preserve response headers on cluster update task (#31421) #31241 changed the cluster state update tasks to run under system context. The context wrapping did not preserve response headers, though. This has led to a test failure on 6.x #31408, as the deprecation warnings were not carried back anymore to the caller when creating an index. This commit changes the restorable context supplier to preserve response headers. --- .../elasticsearch/cluster/service/MasterService.java | 2 +- .../cluster/service/MasterServiceTests.java | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 2543be4811c1e..8927adfd43458 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -730,7 +730,7 @@ public void submitStateUpdateTasks(final String source, return; } final ThreadContext threadContext = threadPool.getThreadContext(); - final Supplier supplier = threadContext.newRestorableContext(false); + final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { threadContext.markAsSystemContext(); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 20587d31f5359..1ef548bd68114 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -54,6 +54,7 @@ import org.junit.BeforeClass; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -177,6 +178,8 @@ public void testThreadContext() throws InterruptedException { try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) { final Map expectedHeaders = Collections.singletonMap("test", "test"); + final Map> expectedResponseHeaders = Collections.singletonMap("testResponse", + Arrays.asList("testResponse")); threadPool.getThreadContext().putHeader(expectedHeaders); final TimeValue ackTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); @@ -187,6 +190,8 @@ public void testThreadContext() throws InterruptedException { public ClusterState execute(ClusterState currentState) { assertTrue(threadPool.getThreadContext().isSystemContext()); assertEquals(Collections.emptyMap(), threadPool.getThreadContext().getHeaders()); + threadPool.getThreadContext().addResponseHeader("testResponse", "testResponse"); + assertEquals(expectedResponseHeaders, threadPool.getThreadContext().getResponseHeaders()); if (randomBoolean()) { return ClusterState.builder(currentState).build(); @@ -201,6 +206,7 @@ public ClusterState execute(ClusterState currentState) { public void onFailure(String source, Exception e) { assertFalse(threadPool.getThreadContext().isSystemContext()); assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + assertEquals(expectedResponseHeaders, threadPool.getThreadContext().getResponseHeaders()); latch.countDown(); } @@ -208,6 +214,7 @@ public void onFailure(String source, Exception e) { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { assertFalse(threadPool.getThreadContext().isSystemContext()); assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + assertEquals(expectedResponseHeaders, threadPool.getThreadContext().getResponseHeaders()); latch.countDown(); } @@ -229,6 +236,7 @@ public TimeValue timeout() { public void onAllNodesAcked(@Nullable Exception e) { assertFalse(threadPool.getThreadContext().isSystemContext()); assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + assertEquals(expectedResponseHeaders, threadPool.getThreadContext().getResponseHeaders()); latch.countDown(); } @@ -236,6 +244,7 @@ public void onAllNodesAcked(@Nullable Exception e) { public void onAckTimeout() { assertFalse(threadPool.getThreadContext().isSystemContext()); assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + assertEquals(expectedResponseHeaders, threadPool.getThreadContext().getResponseHeaders()); latch.countDown(); } @@ -243,6 +252,7 @@ public void onAckTimeout() { assertFalse(threadPool.getThreadContext().isSystemContext()); assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + assertEquals(Collections.emptyMap(), threadPool.getThreadContext().getResponseHeaders()); } latch.await(); From 6a9db43261c66e5aa3b7c1baac95e5bca13a60d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 19 Jun 2018 19:15:33 +0200 Subject: [PATCH 04/17] Revert "Increasing skip version for failing test on 6.x" Since the failing tests are fixed, this commit re-enables running the integration tests on versions on or above 6.2 --- .../rest-api-spec/test/analysis-common/40_token_filters.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index f0fe03b8f4f75..bfb6c97c24f6d 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -1028,8 +1028,8 @@ --- "delimited_payload_filter": - skip: - version: " - 6.99.99" - reason: AwaitsFix, https://github.com/elastic/elasticsearch/issues/31422. delimited_payload_filter deprecated in 6.2, replaced by delimited_payload + version: " - 6.1.99" + reason: delimited_payload_filter deprecated in 6.2, replaced by delimited_payload features: "warnings" - do: From b8dc7de4d851ac44ca13f07a53deede51153207f Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 19 Jun 2018 10:33:57 -0700 Subject: [PATCH 05/17] [DOCS] Moves the info API to docs (#31121) --- docs/reference/rest-api/index.asciidoc | 2 +- .../reference}/rest-api/info.asciidoc | 32 +++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) rename {x-pack/docs/en => docs/reference}/rest-api/info.asciidoc (84%) diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 8c58246a0a658..b9d3c9db60a6f 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -18,7 +18,7 @@ directly to configure and access {xpack} features. -- -include::{xes-repo-dir}/rest-api/info.asciidoc[] +include::info.asciidoc[] include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] include::{xes-repo-dir}/rest-api/licensing.asciidoc[] include::{xes-repo-dir}/rest-api/migration.asciidoc[] diff --git a/x-pack/docs/en/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc similarity index 84% rename from x-pack/docs/en/rest-api/info.asciidoc rename to docs/reference/rest-api/info.asciidoc index ccb979124f2da..1cf4ab563b185 100644 --- a/x-pack/docs/en/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -1,8 +1,9 @@ [role="xpack"] +[testenv="basic"] [[info-api]] == Info API -The info API provides general information about the installed {xpack}. +The info API provides general information about the installed {xpack} features. [float] === Request @@ -55,30 +56,29 @@ Example response: "date" : "2015-04-07T13:34:42Z" }, "license" : { - "uid" : "893361dc-9749-4997-93cb-802e3dofh7aa", - "type" : "trial", - "mode" : "trial", - "status" : "active", - "expiry_date_in_millis" : 1914278399999 + "uid" : "893361dc-9749-4997-93cb-xxx", + "type" : "basic", + "mode" : "basic", + "status" : "active" }, "features" : { "graph" : { "description" : "Graph Data Exploration for the Elastic Stack", - "available" : true, + "available" : false, "enabled" : true }, "logstash" : { "description" : "Logstash management component for X-Pack", - "available" : true, + "available" : false, "enabled" : true }, "ml" : { "description" : "Machine Learning for the Elastic Stack", - "available" : true, + "available" : false, "enabled" : true, "native_code_info" : { - "version" : "6.0.0-alpha1-SNAPSHOT", - "build_hash" : "d081461967d61a" + "version" : "7.0.0-alpha1-SNAPSHOT", + "build_hash" : "99a07c016d5a73" } }, "monitoring" : { @@ -93,12 +93,12 @@ Example response: }, "security" : { "description" : "Security for the Elastic Stack", - "available" : true, + "available" : false, "enabled" : true }, "watcher" : { "description" : "Alerting, Notification and Automation for the Elastic Stack", - "available" : true, + "available" : false, "enabled" : true } }, @@ -107,10 +107,10 @@ Example response: ------------------------------------------------------------ // TESTRESPONSE[s/"hash" : "2798b1a3ce779b3611bb53a0082d4d741e4d3168",/"hash" : "$body.build.hash",/] // TESTRESPONSE[s/"date" : "2015-04-07T13:34:42Z"/"date" : "$body.build.date"/] -// TESTRESPONSE[s/"uid" : "893361dc-9749-4997-93cb-802e3dofh7aa",/"uid": "$body.license.uid",/] +// TESTRESPONSE[s/"uid" : "893361dc-9749-4997-93cb-xxx",/"uid": "$body.license.uid",/] // TESTRESPONSE[s/"expiry_date_in_millis" : 1914278399999/"expiry_date_in_millis" : "$body.license.expiry_date_in_millis"/] -// TESTRESPONSE[s/"version" : "6.0.0-alpha1-SNAPSHOT",/"version": "$body.features.ml.native_code_info.version",/] -// TESTRESPONSE[s/"build_hash" : "d081461967d61a"/"build_hash": "$body.features.ml.native_code_info.build_hash"/] +// TESTRESPONSE[s/"version" : "7.0.0-alpha1-SNAPSHOT",/"version": "$body.features.ml.native_code_info.version",/] +// TESTRESPONSE[s/"build_hash" : "99a07c016d5a73"/"build_hash": "$body.features.ml.native_code_info.build_hash"/] // So much s/// but at least we test that the layout is close to matching.... The following example only returns the build and features information: From 29397e78ce777943f2f1c1c0791174ee838bf40b Mon Sep 17 00:00:00 2001 From: lcawl Date: Tue, 19 Jun 2018 11:41:02 -0700 Subject: [PATCH 06/17] [DOCS] Updated version in Info API example --- docs/reference/rest-api/info.asciidoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 1cf4ab563b185..40ce20ff0a9e5 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -77,8 +77,8 @@ Example response: "available" : false, "enabled" : true, "native_code_info" : { - "version" : "7.0.0-alpha1-SNAPSHOT", - "build_hash" : "99a07c016d5a73" + "version" : "6.4.0-SNAPSHOT", + "build_hash" : "3d394c6afe2fc5" } }, "monitoring" : { @@ -109,8 +109,8 @@ Example response: // TESTRESPONSE[s/"date" : "2015-04-07T13:34:42Z"/"date" : "$body.build.date"/] // TESTRESPONSE[s/"uid" : "893361dc-9749-4997-93cb-xxx",/"uid": "$body.license.uid",/] // TESTRESPONSE[s/"expiry_date_in_millis" : 1914278399999/"expiry_date_in_millis" : "$body.license.expiry_date_in_millis"/] -// TESTRESPONSE[s/"version" : "7.0.0-alpha1-SNAPSHOT",/"version": "$body.features.ml.native_code_info.version",/] -// TESTRESPONSE[s/"build_hash" : "99a07c016d5a73"/"build_hash": "$body.features.ml.native_code_info.build_hash"/] +// TESTRESPONSE[s/"version" : "6.4.0-SNAPSHOT",/"version": "$body.features.ml.native_code_info.version",/] +// TESTRESPONSE[s/"build_hash" : "3d394c6afe2fc5"/"build_hash": "$body.features.ml.native_code_info.build_hash"/] // So much s/// but at least we test that the layout is close to matching.... The following example only returns the build and features information: From 769da43a8ba9206aa4c8a91fb6ccc8c159c4c9fb Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Tue, 19 Jun 2018 11:58:34 -0600 Subject: [PATCH 07/17] Security: fix joining cluster with production license (#31341) The changes made to disable security for trial licenses unless security is explicitly enabled caused issues when a 6.3 node attempts to join a cluster that already has a production license installed. The new node starts off with a trial license and `xpack.security.enabled` is not set for the node, which causes the security code to skip attaching the user to the request. The existing cluster has security enabled and the lack of a user attached to the requests causes the request to be rejected. This commit changes the security code to check if the state has been recovered yet when making the decision on whether or not to attach a user. If the state has not yet been recovered, the code will attach the user to the request in case security is enabled on the cluster being joined. Closes #31332 --- .../license/XPackLicenseState.java | 6 +- .../license/XPackLicenseStateTests.java | 10 +++ .../xpack/security/Security.java | 4 +- .../SecurityServerTransportInterceptor.java | 33 +++++--- .../elasticsearch/license/LicensingTests.java | 40 ++++++++++ ...curityServerTransportInterceptorTests.java | 78 ++++++++++++++++--- 6 files changed, 149 insertions(+), 22 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 2e4caff1a725d..e58c5eda06316 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -254,7 +254,11 @@ private static class Status { public XPackLicenseState(Settings settings) { this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - this.isSecurityExplicitlyEnabled = settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()) && isSecurityEnabled; + // 6.0+ requires TLS for production licenses, so if TLS is enabled and security is enabled + // we can interpret this as an explicit enabling of security if the security enabled + // setting is not explicitly set + this.isSecurityExplicitlyEnabled = isSecurityEnabled && + (settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()) || XPackSettings.TRANSPORT_SSL_ENABLED.get(settings)); } /** Updates the current state of the license, which will change what features are available. */ diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 335932df770e8..f1503919570e6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -79,6 +79,16 @@ public void testSecurityDefaults() { assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + licenseState = + new XPackLicenseState(Settings.builder().put(XPackSettings.TRANSPORT_SSL_ENABLED.getKey(), true).build()); + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(true)); + assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + licenseState = new XPackLicenseState(Settings.EMPTY); assertThat(licenseState.isAuthAllowed(), is(true)); assertThat(licenseState.isIpFilteringAllowed(), is(true)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index cb714afc30c44..3f92f75716c1c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -488,8 +488,8 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste ipFilter.set(new IPFilter(settings, auditTrailService, clusterService.getClusterSettings(), getLicenseState())); components.add(ipFilter.get()); DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterService.getClusterSettings()); - securityInterceptor.set(new SecurityServerTransportInterceptor(settings, threadPool, authcService.get(), authzService, getLicenseState(), - getSslService(), securityContext.get(), destructiveOperations)); + securityInterceptor.set(new SecurityServerTransportInterceptor(settings, threadPool, authcService.get(), + authzService, getLicenseState(), getSslService(), securityContext.get(), destructiveOperations, clusterService)); final Set requestInterceptors; if (XPackSettings.DLS_FLS_ENABLED.get(settings)) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 785425ade9bd6..7de3e5d0980d6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -9,12 +9,14 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -72,6 +74,8 @@ public class SecurityServerTransportInterceptor extends AbstractComponent implem private final SecurityContext securityContext; private final boolean reservedRealmEnabled; + private volatile boolean isStateNotRecovered = true; + public SecurityServerTransportInterceptor(Settings settings, ThreadPool threadPool, AuthenticationService authcService, @@ -79,7 +83,8 @@ public SecurityServerTransportInterceptor(Settings settings, XPackLicenseState licenseState, SSLService sslService, SecurityContext securityContext, - DestructiveOperations destructiveOperations) { + DestructiveOperations destructiveOperations, + ClusterService clusterService) { super(settings); this.settings = settings; this.threadPool = threadPool; @@ -90,6 +95,7 @@ public SecurityServerTransportInterceptor(Settings settings, this.securityContext = securityContext; this.profileFilters = initializeProfileFilters(destructiveOperations); this.reservedRealmEnabled = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); + clusterService.addListener(e -> isStateNotRecovered = e.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)); } @Override @@ -98,7 +104,13 @@ public AsyncSender interceptSender(AsyncSender sender) { @Override public void sendRequest(Transport.Connection connection, String action, TransportRequest request, TransportRequestOptions options, TransportResponseHandler handler) { - if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + // make a local copy of isStateNotRecovered as this is a volatile variable and it + // is used multiple times in the method. The copy to a local variable allows us to + // guarantee we use the same value wherever we would check the value for the state + // being recovered + final boolean stateNotRecovered = isStateNotRecovered; + final boolean sendWithAuth = (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) || stateNotRecovered; + if (sendWithAuth) { // the transport in core normally does this check, BUT since we are serializing to a string header we need to do it // ourselves otherwise we wind up using a version newer than what we can actually send final Version minVersion = Version.min(connection.getVersion(), Version.CURRENT); @@ -108,20 +120,20 @@ public void sendRequest(Transport.Connection conne if (AuthorizationUtils.shouldReplaceUserWithSystem(threadPool.getThreadContext(), action)) { securityContext.executeAsUser(SystemUser.INSTANCE, (original) -> sendWithUser(connection, action, request, options, new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original) - , handler), sender), minVersion); + , handler), sender, stateNotRecovered), minVersion); } else if (AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadPool.getThreadContext())) { AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadPool.getThreadContext(), securityContext, (original) -> sendWithUser(connection, action, request, options, new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original) - , handler), sender)); + , handler), sender, stateNotRecovered)); } else if (securityContext.getAuthentication() != null && securityContext.getAuthentication().getVersion().equals(minVersion) == false) { // re-write the authentication since we want the authentication version to match the version of the connection securityContext.executeAfterRewritingAuthentication(original -> sendWithUser(connection, action, request, options, - new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original), handler), sender), - minVersion); + new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original), handler), sender, + stateNotRecovered), minVersion); } else { - sendWithUser(connection, action, request, options, handler, sender); + sendWithUser(connection, action, request, options, handler, sender, stateNotRecovered); } } else { sender.sendRequest(connection, action, request, options, handler); @@ -132,9 +144,10 @@ public void sendRequest(Transport.Connection conne private void sendWithUser(Transport.Connection connection, String action, TransportRequest request, TransportRequestOptions options, TransportResponseHandler handler, - AsyncSender sender) { - // There cannot be a request outgoing from this node that is not associated with a user. - if (securityContext.getAuthentication() == null) { + AsyncSender sender, final boolean stateNotRecovered) { + // There cannot be a request outgoing from this node that is not associated with a user + // unless we do not know the actual license of the cluster + if (securityContext.getAuthentication() == null && stateNotRecovered == false) { // we use an assertion here to ensure we catch this in our testing infrastructure, but leave the ISE for cases we do not catch // in tests and may be hit by a user assertNoAuthentication(action); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java index c92b311faae85..7e91c1a91c0ef 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -24,11 +24,15 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.transport.Transport; @@ -42,7 +46,10 @@ import org.junit.After; import org.junit.Before; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -108,6 +115,7 @@ protected String configUsersRoles() { public Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)) .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false) .build(); } @@ -118,6 +126,11 @@ protected Collection> nodePlugins() { return plugins; } + @Override + protected int maxNumberOfNodes() { + return super.maxNumberOfNodes() + 1; + } + @Before public void resetLicensing() { enableLicensing(); @@ -253,6 +266,33 @@ public void testTransportClientAuthenticationByLicenseType() throws Exception { } } + public void testNodeJoinWithoutSecurityExplicitlyEnabled() throws Exception { + License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.PLATINUM, License.OperationMode.STANDARD); + enableLicensing(mode); + ensureGreen(); + + Path home = createTempDir(); + Path conf = home.resolve("config"); + Files.createDirectories(conf); + Settings nodeSettings = Settings.builder() + .put(nodeSettings(maxNumberOfNodes() - 1).filter(s -> "xpack.security.enabled".equals(s) == false)) + .put("node.name", "my-test-node") + .put("network.host", "localhost") + .put("cluster.name", internalCluster().getClusterName()) + .put("discovery.zen.minimum_master_nodes", + internalCluster().getInstance(Settings.class).get("discovery.zen.minimum_master_nodes")) + .put("path.home", home) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false) + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "test-zen") + .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "test-zen") + .build(); + Collection> mockPlugins = Arrays.asList(LocalStateSecurity.class, TestZenDiscovery.TestPlugin.class); + try (Node node = new MockNode(nodeSettings, mockPlugins)) { + node.start(); + ensureStableCluster(cluster().size() + 1); + } + } + private static void assertElasticsearchSecurityException(ThrowingRunnable runnable) { ElasticsearchSecurityException ee = expectThrows(ElasticsearchSecurityException.class, runnable); assertThat(ee.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.SECURITY)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 0bc7c527df346..dd7dda48ae813 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -7,11 +7,17 @@ import org.elasticsearch.Version; import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.Transport.Connection; @@ -31,6 +37,7 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.junit.After; import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; @@ -54,25 +61,33 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase { private ThreadContext threadContext; private XPackLicenseState xPackLicenseState; private SecurityContext securityContext; + private ClusterService clusterService; @Override public void setUp() throws Exception { super.setUp(); settings = Settings.builder().put("path.home", createTempDir()).build(); - threadPool = mock(ThreadPool.class); - threadContext = new ThreadContext(settings); - when(threadPool.getThreadContext()).thenReturn(threadContext); + threadPool = new TestThreadPool(getTestName()); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + threadContext = threadPool.getThreadContext(); securityContext = spy(new SecurityContext(settings, threadPool.getThreadContext())); xPackLicenseState = mock(XPackLicenseState.class); when(xPackLicenseState.isAuthAllowed()).thenReturn(true); when(xPackLicenseState.isSecurityEnabled()).thenReturn(true); } + @After + public void stopThreadPool() throws Exception { + clusterService.close(); + terminate(threadPool); + } + public void testSendAsyncUnlicensed() { SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))), clusterService); + ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener when(xPackLicenseState.isAuthAllowed()).thenReturn(false); AtomicBoolean calledWrappedSender = new AtomicBoolean(false); AsyncSender sender = interceptor.interceptSender(new AsyncSender() { @@ -92,6 +107,46 @@ public void sendRequest(Transport.Connection conne verifyZeroInteractions(securityContext); } + public void testSendAsyncWithStateNotRecovered() { + SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, + mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), + securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))), clusterService); + final boolean securityEnabled = randomBoolean(); + final boolean authAllowed = securityEnabled && randomBoolean(); + when(xPackLicenseState.isAuthAllowed()).thenReturn(authAllowed); + when(xPackLicenseState.isSecurityEnabled()).thenReturn(securityEnabled); + ClusterState notRecovered = ClusterState.builder(clusterService.state()) + .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK).build()) + .build(); + ClusterServiceUtils.setState(clusterService, notRecovered); + assertTrue(clusterService.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)); + + AtomicBoolean calledWrappedSender = new AtomicBoolean(false); + AtomicReference sendingUser = new AtomicReference<>(); + AsyncSender sender = interceptor.interceptSender(new AsyncSender() { + @Override + public void sendRequest(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (calledWrappedSender.compareAndSet(false, true) == false) { + fail("sender called more than once!"); + } + sendingUser.set(securityContext.getUser()); + } + }); + Connection connection = mock(Connection.class); + when(connection.getVersion()).thenReturn(Version.CURRENT); + sender.sendRequest(connection, "internal:foo", null, null, null); + assertTrue(calledWrappedSender.get()); + assertEquals(SystemUser.INSTANCE, sendingUser.get()); + verify(xPackLicenseState).isSecurityEnabled(); + if (securityEnabled) { + verify(xPackLicenseState).isAuthAllowed(); + } + verify(securityContext).executeAsUser(any(User.class), any(Consumer.class), eq(Version.CURRENT)); + verifyNoMoreInteractions(xPackLicenseState); + } + public void testSendAsync() throws Exception { final User authUser = randomBoolean() ? new User("authenticator") : null; final User user = new User("test", randomRoles(), authUser); @@ -100,7 +155,8 @@ public void testSendAsync() throws Exception { SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))), clusterService); + ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener AtomicBoolean calledWrappedSender = new AtomicBoolean(false); AtomicReference sendingUser = new AtomicReference<>(); @@ -136,7 +192,8 @@ public void testSendAsyncSwitchToSystem() throws Exception { SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))), clusterService); + ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener AtomicBoolean calledWrappedSender = new AtomicBoolean(false); AtomicReference sendingUser = new AtomicReference<>(); @@ -167,11 +224,12 @@ public void testSendWithoutUser() throws Exception { SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))) { + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))), clusterService) { @Override void assertNoAuthentication(String action) { } }; + ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener assertNull(securityContext.getUser()); AsyncSender sender = interceptor.interceptSender(new AsyncSender() { @@ -203,7 +261,8 @@ public void testSendToNewerVersionSetsCorrectVersion() throws Exception { SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))), clusterService); + ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener AtomicBoolean calledWrappedSender = new AtomicBoolean(false); AtomicReference sendingUser = new AtomicReference<>(); @@ -243,7 +302,8 @@ public void testSendToOlderVersionSetsCorrectVersion() throws Exception { SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))), clusterService); + ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener AtomicBoolean calledWrappedSender = new AtomicBoolean(false); AtomicReference sendingUser = new AtomicReference<>(); From f31366895a4b1df877f084360df31acc6bcfb97f Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 19 Jun 2018 13:57:10 -0700 Subject: [PATCH 08/17] [DOCS] Fixes code snippet testing for machine learning (#31189) --- x-pack/docs/build.gradle | 8 -- x-pack/docs/en/ml/aggregations.asciidoc | 3 +- x-pack/docs/en/ml/api-quickref.asciidoc | 4 +- x-pack/docs/en/ml/categories.asciidoc | 5 +- x-pack/docs/en/ml/configuring.asciidoc | 3 +- x-pack/docs/en/ml/customurl.asciidoc | 2 +- x-pack/docs/en/ml/functions.asciidoc | 3 +- x-pack/docs/en/ml/functions/count.asciidoc | 119 ++++++++++++++++---- x-pack/docs/en/ml/functions/geo.asciidoc | 30 ++++- x-pack/docs/en/ml/functions/info.asciidoc | 3 + x-pack/docs/en/ml/functions/metric.asciidoc | 20 +++- x-pack/docs/en/ml/functions/rare.asciidoc | 6 +- x-pack/docs/en/ml/functions/sum.asciidoc | 18 ++- x-pack/docs/en/ml/functions/time.asciidoc | 5 +- x-pack/docs/en/ml/populations.asciidoc | 3 +- x-pack/docs/en/ml/stopping-ml.asciidoc | 11 +- x-pack/docs/en/ml/transforms.asciidoc | 10 +- 17 files changed, 179 insertions(+), 74 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 6035e75ec1638..6061c7bd8fc24 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -9,13 +9,6 @@ apply plugin: 'elasticsearch.docs-test' * only remove entries from this list. When it is empty we'll remove it * entirely and have a party! There will be cake and everything.... */ buildRestTests.expectedUnconvertedCandidates = [ - 'en/ml/functions/count.asciidoc', - 'en/ml/functions/geo.asciidoc', - 'en/ml/functions/info.asciidoc', - 'en/ml/functions/metric.asciidoc', - 'en/ml/functions/rare.asciidoc', - 'en/ml/functions/sum.asciidoc', - 'en/ml/functions/time.asciidoc', 'en/rest-api/watcher/put-watch.asciidoc', 'en/security/authentication/user-cache.asciidoc', 'en/security/authorization/field-and-document-access-control.asciidoc', @@ -56,7 +49,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/watcher/troubleshooting.asciidoc', 'en/rest-api/license/delete-license.asciidoc', 'en/rest-api/license/update-license.asciidoc', - 'en/ml/api-quickref.asciidoc', 'en/rest-api/ml/delete-snapshot.asciidoc', 'en/rest-api/ml/forecast.asciidoc', 'en/rest-api/ml/get-bucket.asciidoc', diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/x-pack/docs/en/ml/aggregations.asciidoc index f3b8e6b3e34d6..5ff54b76f01b3 100644 --- a/x-pack/docs/en/ml/aggregations.asciidoc +++ b/x-pack/docs/en/ml/aggregations.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-configuring-aggregation]] -=== Aggregating Data For Faster Performance +=== Aggregating data for faster performance By default, {dfeeds} fetch data from {es} using search and scroll requests. It can be significantly more efficient, however, to aggregate data in {es} diff --git a/x-pack/docs/en/ml/api-quickref.asciidoc b/x-pack/docs/en/ml/api-quickref.asciidoc index 9602379c37416..dc87a6ba209c2 100644 --- a/x-pack/docs/en/ml/api-quickref.asciidoc +++ b/x-pack/docs/en/ml/api-quickref.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-api-quickref]] -== API Quick Reference +== API quick reference All {ml} endpoints have the following base: @@ -7,6 +8,7 @@ All {ml} endpoints have the following base: ---- /_xpack/ml/ ---- +// NOTCONSOLE The main {ml} resources can be accessed with a variety of endpoints: diff --git a/x-pack/docs/en/ml/categories.asciidoc b/x-pack/docs/en/ml/categories.asciidoc index bb217e2e18654..21f71b871cbb9 100644 --- a/x-pack/docs/en/ml/categories.asciidoc +++ b/x-pack/docs/en/ml/categories.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[ml-configuring-categories]] === Categorizing log messages @@ -77,7 +78,7 @@ NOTE: To add the `categorization_examples_limit` property, you must use the [float] [[ml-configuring-analyzer]] -==== Customizing the Categorization Analyzer +==== Customizing the categorization analyzer Categorization uses English dictionary words to identify log message categories. By default, it also uses English tokenization rules. For this reason, if you use @@ -213,7 +214,7 @@ API examples above. [float] [[ml-viewing-categories]] -==== Viewing Categorization Results +==== Viewing categorization results After you open the job and start the {dfeed} or supply data to the job, you can view the categorization results in {kib}. For example: diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/x-pack/docs/en/ml/configuring.asciidoc index ba965a08b0462..c2c6e69a71128 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/x-pack/docs/en/ml/configuring.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-configuring]] -== Configuring Machine Learning +== Configuring machine learning If you want to use {xpackml} features, there must be at least one {ml} node in your cluster and all master-eligible nodes must have {ml} enabled. By default, diff --git a/x-pack/docs/en/ml/customurl.asciidoc b/x-pack/docs/en/ml/customurl.asciidoc index 7c773c4b9bf49..7c197084c0e5f 100644 --- a/x-pack/docs/en/ml/customurl.asciidoc +++ b/x-pack/docs/en/ml/customurl.asciidoc @@ -48,7 +48,7 @@ using the {ml} APIs. [float] [[ml-configuring-url-strings]] -==== String Substitution in Custom URLs +==== String substitution in custom URLs You can use dollar sign ($) delimited tokens in a custom URL. These tokens are substituted for the values of the corresponding fields in the anomaly records. diff --git a/x-pack/docs/en/ml/functions.asciidoc b/x-pack/docs/en/ml/functions.asciidoc index ae5f768e05697..e32470c6827b6 100644 --- a/x-pack/docs/en/ml/functions.asciidoc +++ b/x-pack/docs/en/ml/functions.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-functions]] -== Function Reference +== Function reference The {xpackml} features include analysis functions that provide a wide variety of flexible ways to analyze data for anomalies. diff --git a/x-pack/docs/en/ml/functions/count.asciidoc b/x-pack/docs/en/ml/functions/count.asciidoc index 4b70f80933dca..a2dc5645b61ae 100644 --- a/x-pack/docs/en/ml/functions/count.asciidoc +++ b/x-pack/docs/en/ml/functions/count.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-count-functions]] -=== Count Functions +=== Count functions Count functions detect anomalies when the number of events in a bucket is anomalous. @@ -21,7 +22,7 @@ The {xpackml} features include the following count functions: [float] [[ml-count]] -===== Count, High_count, Low_count +===== Count, high_count, low_count The `count` function detects anomalies when the number of events in a bucket is anomalous. @@ -44,8 +45,20 @@ see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects] .Example 1: Analyzing events with the count function [source,js] -------------------------------------------------- -{ "function" : "count" } +PUT _xpack/ml/anomaly_detectors/example1 +{ + "analysis_config": { + "detectors": [{ + "function" : "count" + }] + }, + "data_description": { + "time_field":"timestamp", + "time_format": "epoch_ms" + } +} -------------------------------------------------- +// CONSOLE This example is probably the simplest possible analysis. It identifies time buckets during which the overall count of events is higher or lower than @@ -57,12 +70,22 @@ and detects when the event rate is unusual compared to its past behavior. .Example 2: Analyzing errors with the high_count function [source,js] -------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/example2 { - "function" : "high_count", - "by_field_name" : "error_code", - "over_field_name": "user" + "analysis_config": { + "detectors": [{ + "function" : "high_count", + "by_field_name" : "error_code", + "over_field_name": "user" + }] + }, + "data_description": { + "time_field":"timestamp", + "time_format": "epoch_ms" + } } -------------------------------------------------- +// CONSOLE If you use this `high_count` function in a detector in your job, it models the event rate for each error code. It detects users that generate an @@ -72,11 +95,21 @@ unusually high count of error codes compared to other users. .Example 3: Analyzing status codes with the low_count function [source,js] -------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/example3 { - "function" : "low_count", - "by_field_name" : "status_code" + "analysis_config": { + "detectors": [{ + "function" : "low_count", + "by_field_name" : "status_code" + }] + }, + "data_description": { + "time_field":"timestamp", + "time_format": "epoch_ms" + } } -------------------------------------------------- +// CONSOLE In this example, the function detects when the count of events for a status code is lower than usual. @@ -88,22 +121,30 @@ compared to its past behavior. .Example 4: Analyzing aggregated data with the count function [source,js] -------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/example4 { - "summary_count_field_name" : "events_per_min", - "detectors" [ - { "function" : "count" } - ] -} + "analysis_config": { + "summary_count_field_name" : "events_per_min", + "detectors": [{ + "function" : "count" + }] + }, + "data_description": { + "time_field":"timestamp", + "time_format": "epoch_ms" + } +} -------------------------------------------------- +// CONSOLE If you are analyzing an aggregated `events_per_min` field, do not use a sum function (for example, `sum(events_per_min)`). Instead, use the count function -and the `summary_count_field_name` property. -//TO-DO: For more information, see <>. +and the `summary_count_field_name` property. For more information, see +<>. [float] [[ml-nonzero-count]] -===== Non_zero_count, High_non_zero_count, Low_non_zero_count +===== Non_zero_count, high_non_zero_count, low_non_zero_count The `non_zero_count` function detects anomalies when the number of events in a bucket is anomalous, but it ignores cases where the bucket count is zero. Use @@ -144,11 +185,21 @@ The `non_zero_count` function models only the following data: .Example 5: Analyzing signatures with the high_non_zero_count function [source,js] -------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/example5 { - "function" : "high_non_zero_count", - "by_field_name" : "signaturename" + "analysis_config": { + "detectors": [{ + "function" : "high_non_zero_count", + "by_field_name" : "signaturename" + }] + }, + "data_description": { + "time_field":"timestamp", + "time_format": "epoch_ms" + } } -------------------------------------------------- +// CONSOLE If you use this `high_non_zero_count` function in a detector in your job, it models the count of events for the `signaturename` field. It ignores any buckets @@ -163,7 +214,7 @@ data is sparse, use the `count` functions, which are optimized for that scenario [float] [[ml-distinct-count]] -===== Distinct_count, High_distinct_count, Low_distinct_count +===== Distinct_count, high_distinct_count, low_distinct_count The `distinct_count` function detects anomalies where the number of distinct values in one field is unusual. @@ -187,11 +238,21 @@ see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects] .Example 6: Analyzing users with the distinct_count function [source,js] -------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/example6 { - "function" : "distinct_count", - "field_name" : "user" + "analysis_config": { + "detectors": [{ + "function" : "distinct_count", + "field_name" : "user" + }] + }, + "data_description": { + "time_field":"timestamp", + "time_format": "epoch_ms" + } } -------------------------------------------------- +// CONSOLE This `distinct_count` function detects when a system has an unusual number of logged in users. When you use this function in a detector in your job, it @@ -201,12 +262,22 @@ users is unusual compared to the past. .Example 7: Analyzing ports with the high_distinct_count function [source,js] -------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/example7 { - "function" : "high_distinct_count", - "field_name" : "dst_port", - "over_field_name": "src_ip" + "analysis_config": { + "detectors": [{ + "function" : "high_distinct_count", + "field_name" : "dst_port", + "over_field_name": "src_ip" + }] + }, + "data_description": { + "time_field":"timestamp", + "time_format": "epoch_ms" + } } -------------------------------------------------- +// CONSOLE This example detects instances of port scanning. When you use this function in a detector in your job, it models the distinct count of ports. It also detects the diff --git a/x-pack/docs/en/ml/functions/geo.asciidoc b/x-pack/docs/en/ml/functions/geo.asciidoc index cc98e95bf2069..e9685b46e1677 100644 --- a/x-pack/docs/en/ml/functions/geo.asciidoc +++ b/x-pack/docs/en/ml/functions/geo.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-geo-functions]] -=== Geographic Functions +=== Geographic functions The geographic functions detect anomalies in the geographic location of the input data. @@ -28,12 +29,22 @@ see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects] .Example 1: Analyzing transactions with the lat_long function [source,js] -------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/example1 { - "function" : "lat_long", - "field_name" : "transactionCoordinates", - "by_field_name" : "creditCardNumber" + "analysis_config": { + "detectors": [{ + "function" : "lat_long", + "field_name" : "transactionCoordinates", + "by_field_name" : "creditCardNumber" + }] + }, + "data_description": { + "time_field":"timestamp", + "time_format": "epoch_ms" + } } -------------------------------------------------- +// CONSOLE If you use this `lat_long` function in a detector in your job, it detects anomalies where the geographic location of a credit card transaction is @@ -54,6 +65,7 @@ For example, JSON data might contain the following transaction coordinates: "creditCardNumber": "1234123412341234" } -------------------------------------------------- +// NOTCONSOLE In {es}, location data is likely to be stored in `geo_point` fields. For more information, see {ref}/geo-point.html[Geo-point datatype]. This data type is not @@ -64,7 +76,15 @@ format. For example, the following Painless script transforms [source,js] -------------------------------------------------- +PUT _xpack/ml/datafeeds/datafeed-test2 { + "job_id": "farequote", + "indices": ["farequote"], + "query": { + "match_all": { + "boost": 1 + } + }, "script_fields": { "lat-lon": { "script": { @@ -75,5 +95,7 @@ format. For example, the following Painless script transforms } } -------------------------------------------------- +// CONSOLE +// TEST[setup:farequote_job] For more information, see <>. diff --git a/x-pack/docs/en/ml/functions/info.asciidoc b/x-pack/docs/en/ml/functions/info.asciidoc index f964d4eb3ec67..2c3117e0e5644 100644 --- a/x-pack/docs/en/ml/functions/info.asciidoc +++ b/x-pack/docs/en/ml/functions/info.asciidoc @@ -40,6 +40,7 @@ For more information about those properties, see "over_field_name" : "highest_registered_domain" } -------------------------------------------------- +// NOTCONSOLE If you use this `info_content` function in a detector in your job, it models information that is present in the `subdomain` string. It detects anomalies @@ -60,6 +61,7 @@ choice. "over_field_name" : "src_ip" } -------------------------------------------------- +// NOTCONSOLE If you use this `high_info_content` function in a detector in your job, it models information content that is held in the DNS query string. It detects @@ -77,6 +79,7 @@ information content is higher than expected. "by_field_name" : "logfilename" } -------------------------------------------------- +// NOTCONSOLE If you use this `low_info_content` function in a detector in your job, it models information content that is present in the message string for each diff --git a/x-pack/docs/en/ml/functions/metric.asciidoc b/x-pack/docs/en/ml/functions/metric.asciidoc index 495fc6f333575..3ee5179702720 100644 --- a/x-pack/docs/en/ml/functions/metric.asciidoc +++ b/x-pack/docs/en/ml/functions/metric.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-metric-functions]] -=== Metric Functions +=== Metric functions The metric functions include functions such as mean, min and max. These values are calculated for each bucket. Field values that cannot be converted to @@ -42,6 +43,7 @@ For more information about those properties, see "by_field_name" : "product" } -------------------------------------------------- +// NOTCONSOLE If you use this `min` function in a detector in your job, it detects where the smallest transaction is lower than previously observed. You can use this @@ -76,6 +78,7 @@ For more information about those properties, see "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `max` function in a detector in your job, it detects where the longest `responsetime` is longer than previously observed. You can use this @@ -98,6 +101,7 @@ to previous applications. "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE The analysis in the previous example can be performed alongside `high_mean` functions by application. By combining detectors and using the same influencer @@ -106,7 +110,7 @@ response times for each bucket. [float] [[ml-metric-median]] -==== Median, High_median, Low_median +==== Median, high_median, low_median The `median` function detects anomalies in the statistical median of a value. The median value is calculated for each bucket. @@ -136,6 +140,7 @@ For more information about those properties, see "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `median` function in a detector in your job, it models the median `responsetime` for each application over time. It detects when the median @@ -143,7 +148,7 @@ median `responsetime` for each application over time. It detects when the median [float] [[ml-metric-mean]] -==== Mean, High_mean, Low_mean +==== Mean, high_mean, low_mean The `mean` function detects anomalies in the arithmetic mean of a value. The mean value is calculated for each bucket. @@ -173,6 +178,7 @@ For more information about those properties, see "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `mean` function in a detector in your job, it models the mean `responsetime` for each application over time. It detects when the mean @@ -187,6 +193,7 @@ If you use this `mean` function in a detector in your job, it models the mean "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `high_mean` function in a detector in your job, it models the mean `responsetime` for each application over time. It detects when the mean @@ -201,6 +208,7 @@ mean `responsetime` for each application over time. It detects when the mean "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `low_mean` function in a detector in your job, it models the mean `responsetime` for each application over time. It detects when the mean @@ -237,6 +245,7 @@ For more information about those properties, see "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `metric` function in a detector in your job, it models the mean, min, and max `responsetime` for each application over time. It detects @@ -245,7 +254,7 @@ when the mean, min, or max `responsetime` is unusual compared to previous [float] [[ml-metric-varp]] -==== Varp, High_varp, Low_varp +==== Varp, high_varp, low_varp The `varp` function detects anomalies in the variance of a value which is a measure of the variability and spread in the data. @@ -273,6 +282,7 @@ For more information about those properties, see "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `varp` function in a detector in your job, it models the variance in values of `responsetime` for each application over time. It detects @@ -288,6 +298,7 @@ behavior. "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `high_varp` function in a detector in your job, it models the variance in values of `responsetime` for each application over time. It detects @@ -303,6 +314,7 @@ behavior. "by_field_name" : "application" } -------------------------------------------------- +// NOTCONSOLE If you use this `low_varp` function in a detector in your job, it models the variance in values of `responsetime` for each application over time. It detects diff --git a/x-pack/docs/en/ml/functions/rare.asciidoc b/x-pack/docs/en/ml/functions/rare.asciidoc index 2485605557cfa..fc30918b508f1 100644 --- a/x-pack/docs/en/ml/functions/rare.asciidoc +++ b/x-pack/docs/en/ml/functions/rare.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-rare-functions]] -=== Rare Functions +=== Rare functions The rare functions detect values that occur rarely in time or rarely for a population. @@ -54,6 +55,7 @@ For more information about those properties, see "by_field_name" : "status" } -------------------------------------------------- +// NOTCONSOLE If you use this `rare` function in a detector in your job, it detects values that are rare in time. It models status codes that occur over time and detects @@ -69,6 +71,7 @@ status codes in a web access log that have never (or rarely) occurred before. "over_field_name" : "clientip" } -------------------------------------------------- +// NOTCONSOLE If you use this `rare` function in a detector in your job, it detects values that are rare in a population. It models status code and client IP interactions @@ -111,6 +114,7 @@ For more information about those properties, see "over_field_name" : "clientip" } -------------------------------------------------- +// NOTCONSOLE If you use this `freq_rare` function in a detector in your job, it detects values that are frequently rare in a population. It models URI paths and diff --git a/x-pack/docs/en/ml/functions/sum.asciidoc b/x-pack/docs/en/ml/functions/sum.asciidoc index 3a0f0b264e9ef..7a95ad63fccee 100644 --- a/x-pack/docs/en/ml/functions/sum.asciidoc +++ b/x-pack/docs/en/ml/functions/sum.asciidoc @@ -1,6 +1,6 @@ - +[role="xpack"] [[ml-sum-functions]] -=== Sum Functions +=== Sum functions The sum functions detect anomalies when the sum of a field in a bucket is anomalous. @@ -16,16 +16,9 @@ The {xpackml} features include the following sum functions: * xref:ml-sum[`sum`, `high_sum`, `low_sum`] * xref:ml-nonnull-sum[`non_null_sum`, `high_non_null_sum`, `low_non_null_sum`] -//// -TBD: Incorporate from prelert docs?: -Input data may contain pre-calculated fields giving the total count of some value e.g. transactions per minute. -Ensure you are familiar with our advice on Summarization of Input Data, as this is likely to provide -a more appropriate method to using the sum function. -//// - [float] [[ml-sum]] -==== Sum, High_sum, Low_sum +==== Sum, high_sum, low_sum The `sum` function detects anomalies where the sum of a field in a bucket is anomalous. @@ -54,6 +47,7 @@ For more information about those properties, see "over_field_name" : "employee" } -------------------------------------------------- +// NOTCONSOLE If you use this `sum` function in a detector in your job, it models total expenses per employees for each cost center. For each time bucket, @@ -69,6 +63,7 @@ to other employees. "over_field_name" : "cs_host" } -------------------------------------------------- +// NOTCONSOLE If you use this `high_sum` function in a detector in your job, it models total `cs_bytes`. It detects `cs_hosts` that transfer unusually high @@ -79,7 +74,7 @@ to find users that are abusing internet privileges. [float] [[ml-nonnull-sum]] -==== Non_null_sum, High_non_null_sum, Low_non_null_sum +==== Non_null_sum, high_non_null_sum, low_non_null_sum The `non_null_sum` function is useful if your data is sparse. Buckets without values are ignored and buckets with a zero value are analyzed. @@ -110,6 +105,7 @@ is not applicable for this function. "byFieldName" : "employee" } -------------------------------------------------- +// NOTCONSOLE If you use this `high_non_null_sum` function in a detector in your job, it models the total `amount_approved` for each employee. It ignores any buckets diff --git a/x-pack/docs/en/ml/functions/time.asciidoc b/x-pack/docs/en/ml/functions/time.asciidoc index a8067e2ca1342..ac8199307f130 100644 --- a/x-pack/docs/en/ml/functions/time.asciidoc +++ b/x-pack/docs/en/ml/functions/time.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-time-functions]] -=== Time Functions +=== Time functions The time functions detect events that happen at unusual times, either of the day or of the week. These functions can be used to find unusual patterns of behavior, @@ -60,6 +61,7 @@ For more information about those properties, see "by_field_name" : "process" } -------------------------------------------------- +// NOTCONSOLE If you use this `time_of_day` function in a detector in your job, it models when events occur throughout a day for each process. It detects when an @@ -91,6 +93,7 @@ For more information about those properties, see "over_field_name" : "workstation" } -------------------------------------------------- +// NOTCONSOLE If you use this `time_of_week` function in a detector in your job, it models when events occur throughout the week for each `eventcode`. It detects diff --git a/x-pack/docs/en/ml/populations.asciidoc b/x-pack/docs/en/ml/populations.asciidoc index 53e10ce8d41b6..bf0dd2ad7d7bb 100644 --- a/x-pack/docs/en/ml/populations.asciidoc +++ b/x-pack/docs/en/ml/populations.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-configuring-pop]] -=== Performing Population Analysis +=== Performing population analysis Entities or events in your data can be considered anomalous when: diff --git a/x-pack/docs/en/ml/stopping-ml.asciidoc b/x-pack/docs/en/ml/stopping-ml.asciidoc index 862fe5cf05061..c0be2d947cdc7 100644 --- a/x-pack/docs/en/ml/stopping-ml.asciidoc +++ b/x-pack/docs/en/ml/stopping-ml.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[stopping-ml]] -== Stopping Machine Learning +== Stopping machine learning An orderly shutdown of {ml} ensures that: @@ -24,10 +25,10 @@ request stops the `feed1` {dfeed}: [source,js] -------------------------------------------------- -POST _xpack/ml/datafeeds/feed1/_stop +POST _xpack/ml/datafeeds/datafeed-total-requests/_stop -------------------------------------------------- // CONSOLE -// TEST[skip:todo] +// TEST[setup:server_metrics_startdf] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -63,10 +64,10 @@ example, the following request closes the `job1` job: [source,js] -------------------------------------------------- -POST _xpack/ml/anomaly_detectors/job1/_close +POST _xpack/ml/anomaly_detectors/total-requests/_close -------------------------------------------------- // CONSOLE -// TEST[skip:todo] +// TEST[setup:server_metrics_openjob] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. diff --git a/x-pack/docs/en/ml/transforms.asciidoc b/x-pack/docs/en/ml/transforms.asciidoc index 9789518081be2..c4b4d56029748 100644 --- a/x-pack/docs/en/ml/transforms.asciidoc +++ b/x-pack/docs/en/ml/transforms.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ml-configuring-transform]] -=== Transforming Data With Script Fields +=== Transforming data with script fields If you use {dfeeds}, you can add scripts to transform your data before it is analyzed. {dfeeds-cap} contain an optional `script_fields` property, where @@ -602,10 +603,3 @@ The preview {dfeed} API returns the following results, which show that ] ---------------------------------- // TESTRESPONSE - -//// -==== Configuring Script Fields in {dfeeds-cap} - -//TO-DO: Add Kibana steps from -//https://github.com/elastic/prelert-legacy/wiki/Transforming-data-with-script_fields#transforming-geo_point-data-to-a-workable-string-format -//// From faa78e8cd5e38f92b99acb8128b11fed98d886e9 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 20 Jun 2018 08:43:08 +0200 Subject: [PATCH 09/17] Revert "Mute DefaultShardsIT#testDefaultShards test" This reverts commit 2dd7e3febb1e06db1bf96d62315c7edb7b948d1e. --- .../test/java/org/elasticsearch/test/rest/DefaultShardsIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java index 74edfbd189a4c..b40f2f1f85527 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java @@ -31,7 +31,6 @@ public class DefaultShardsIT extends ESRestTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31408") public void testDefaultShards() throws IOException { final Response response = client().performRequest(new Request("PUT", "/index")); final String warning = response.getHeader("Warning"); From e644c0905ba29395bc34a02ec73c3319d30da5b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 20 Jun 2018 10:23:39 +0200 Subject: [PATCH 10/17] Fix use of time zone in date_histogram rewrite (#31407) Currently, DateHistogramAggregationBuilder#rewriteTimeZone uses the aggregation date math parser and time zone to check whether all values in a read have the same timezone to speed up computation. However, the upper and lower bounds to check are retrieved as longs in epoch_millis, so they don't need to get parsed using a time zone or a parser other than "epoch_millis". This changes this behaviour that was causing problems when the field type mapping was specifying only "epoch_millis" as a format but a different timezone than UTC was used. Closes #31392 --- .../DateHistogramAggregationBuilder.java | 14 ++++---- .../aggregations/bucket/DateHistogramIT.java | 34 ++++++++++++++++++- 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index bb391f21f1e40..bb785efde488e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -25,6 +25,8 @@ import org.apache.lucene.search.DocIdSetIterator; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.joda.DateMathParser; +import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.unit.TimeValue; @@ -36,7 +38,6 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -59,6 +60,7 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -70,6 +72,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { public static final String NAME = "date_histogram"; + private static DateMathParser EPOCH_MILLIS_PARSER = new DateMathParser(Joda.forPattern("epoch_millis", Locale.ROOT)); public static final Map DATE_FIELD_UNITS; @@ -380,7 +383,7 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { Long anyInstant = null; final IndexNumericFieldData fieldData = context.getForField(ft); for (LeafReaderContext ctx : reader.leaves()) { - AtomicNumericFieldData leafFD = ((IndexNumericFieldData) fieldData).load(ctx); + AtomicNumericFieldData leafFD = fieldData.load(ctx); SortedNumericDocValues values = leafFD.getLongValues(); if (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { anyInstant = values.nextValue(); @@ -406,11 +409,8 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { // rounding rounds down, so 'nextTransition' is a good upper bound final long high = nextTransition; - final DocValueFormat format = ft.docValueFormat(null, null); - final Object formattedLow = format.format(low); - final Object formattedHigh = format.format(high); - if (ft.isFieldWithinQuery(reader, formattedLow, formattedHigh, - true, false, tz, null, context) == Relation.WITHIN) { + if (ft.isFieldWithinQuery(reader, low, high, true, false, DateTimeZone.UTC, EPOCH_MILLIS_PARSER, + context) == Relation.WITHIN) { // All values in this reader have the same offset despite daylight saving times. // This is very common for location-based timezones such as Europe/Paris in // combination with time-based indices. diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index a4a561cfee35f..26e6f4c076553 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; @@ -41,7 +42,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.aggregations.metrics.sum.Sum; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; import org.joda.time.DateTime; @@ -1341,6 +1341,38 @@ public void testExceptionOnNegativeInterval() { } } + /** + * https://github.com/elastic/elasticsearch/issues/31392 demonstrates an edge case where a date field mapping with + * "format" = "epoch_millis" can lead for the date histogram aggregation to throw an error if a non-UTC time zone + * with daylight savings time is used. This test was added to check this is working now + * @throws ExecutionException + * @throws InterruptedException + */ + public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, ExecutionException { + String index = "test31392"; + assertAcked(client().admin().indices().prepareCreate(index).addMapping("type", "d", "type=date,format=epoch_millis").get()); + indexRandom(true, client().prepareIndex(index, "type").setSource("d", "1477954800000")); + ensureSearchable(index); + SearchResponse response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") + .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(DateTimeZone.forID("Europe/Berlin"))).execute().actionGet(); + assertSearchResponse(response); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); + + response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") + .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(DateTimeZone.forID("Europe/Berlin")).format("yyyy-MM-dd")) + .execute().actionGet(); + assertSearchResponse(response); + histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2016-11-01")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); + + internalCluster().wipeIndices(index); + } + /** * When DST ends, local time turns back one hour, so between 2am and 4am wall time we should have four buckets: * "2015-10-25T02:00:00.000+02:00", From 6aba7ebeb1362f609d51e61e9a89f5283cec3e70 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Wed, 20 Jun 2018 10:16:26 +0100 Subject: [PATCH 11/17] Multiplexing token filter (#31208) The `multiplexer` filter emits multiple tokens at the same position, each version of the token haivng been passed through a different filter chain. Identical tokens at the same position are removed. This allows users to, for example, index lowercase and original-case tokens, or stemmed and unstemmed versions, in the same field, so that they can search for a stemmed term within x positions of an unstemmed term. --- docs/reference/analysis/tokenfilters.asciidoc | 2 + .../multiplexer-tokenfilter.asciidoc | 116 +++++++++++ .../analysis/common/CommonAnalysisPlugin.java | 1 + .../common/MultiplexerTokenFilterFactory.java | 195 ++++++++++++++++++ .../common/MultiplexerTokenFilterTests.java | 106 ++++++++++ .../index/analysis/AnalysisRegistry.java | 13 +- .../analysis/ReferringFilterFactory.java | 37 ++++ .../index/analysis/AnalysisRegistryTests.java | 1 - 8 files changed, 469 insertions(+), 2 deletions(-) create mode 100644 docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc create mode 100644 modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/MultiplexerTokenFilterFactory.java create mode 100644 modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/MultiplexerTokenFilterTests.java create mode 100644 server/src/main/java/org/elasticsearch/index/analysis/ReferringFilterFactory.java diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc index dd5cb2e702cff..ee891fdd09aa7 100644 --- a/docs/reference/analysis/tokenfilters.asciidoc +++ b/docs/reference/analysis/tokenfilters.asciidoc @@ -35,6 +35,8 @@ include::tokenfilters/word-delimiter-tokenfilter.asciidoc[] include::tokenfilters/word-delimiter-graph-tokenfilter.asciidoc[] +include::tokenfilters/multiplexer-tokenfilter.asciidoc[] + include::tokenfilters/stemmer-tokenfilter.asciidoc[] include::tokenfilters/stemmer-override-tokenfilter.asciidoc[] diff --git a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc new file mode 100644 index 0000000000000..51937084e3984 --- /dev/null +++ b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc @@ -0,0 +1,116 @@ +[[analysis-multiplexer-tokenfilter]] +=== Multiplexer Token Filter + +A token filter of type `multiplexer` will emit multiple tokens at the same position, +each version of the token having been run through a different filter. Identical +output tokens at the same position will be removed. + +WARNING: If the incoming token stream has duplicate tokens, then these will also be +removed by the multiplexer + +[float] +=== Options +[horizontal] +filters:: a list of token filters to apply to incoming tokens. These can be any + token filters defined elsewhere in the index mappings. Filters can be chained + using a comma-delimited string, so for example `"lowercase, porter_stem"` would + apply the `lowercase` filter and then the `porter_stem` filter to a single token. + +WARNING: Shingle or multi-word synonym token filters will not function normally + when they are declared in the filters array because they read ahead internally + which is unsupported by the multiplexer + +preserve_original:: if `true` (the default) then emit the original token in + addition to the filtered tokens + + +[float] +=== Settings example + +You can set it up like: + +[source,js] +-------------------------------------------------- +PUT /multiplexer_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "my_analyzer" : { + "tokenizer" : "standard", + "filter" : [ "my_multiplexer" ] + } + }, + "filter" : { + "my_multiplexer" : { + "type" : "multiplexer", + "filters" : [ "lowercase", "lowercase, porter_stem" ] + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +And test it like: + +[source,js] +-------------------------------------------------- +POST /multiplexer_example/_analyze +{ + "analyzer" : "my_analyzer", + "text" : "Going HOME" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +And it'd respond: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "Going", + "start_offset": 0, + "end_offset": 5, + "type": "", + "position": 0 + }, + { + "token": "going", + "start_offset": 0, + "end_offset": 5, + "type": "", + "position": 0 + }, + { + "token": "go", + "start_offset": 0, + "end_offset": 5, + "type": "", + "position": 0 + }, + { + "token": "HOME", + "start_offset": 6, + "end_offset": 10, + "type": "", + "position": 1 + }, + { + "token": "home", <1> + "start_offset": 6, + "end_offset": 10, + "type": "", + "position": 1 + } + ] +} +-------------------------------------------------- +// TESTRESPONSE + +<1> The stemmer has also emitted a token `home` at position 1, but because it is a +duplicate of this token it has been removed from the token stream \ No newline at end of file diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index cdd8101a73c70..ca2f74b5efee0 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -226,6 +226,7 @@ public Map> getTokenFilters() { filters.put("limit", LimitTokenCountFilterFactory::new); filters.put("lowercase", LowerCaseTokenFilterFactory::new); filters.put("min_hash", MinHashTokenFilterFactory::new); + filters.put("multiplexer", MultiplexerTokenFilterFactory::new); filters.put("ngram", NGramTokenFilterFactory::new); filters.put("nGram", NGramTokenFilterFactory::new); filters.put("pattern_capture", requriesAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new)); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/MultiplexerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/MultiplexerTokenFilterFactory.java new file mode 100644 index 0000000000000..1cf5303a77209 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/MultiplexerTokenFilterFactory.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter; +import org.apache.lucene.analysis.miscellaneous.RemoveDuplicatesTokenFilter; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.ReferringFilterFactory; +import org.elasticsearch.index.analysis.TokenFilterFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory implements ReferringFilterFactory { + + private List filters; + private List filterNames; + private final boolean preserveOriginal; + + private static final TokenFilterFactory IDENTITY_FACTORY = new TokenFilterFactory() { + @Override + public String name() { + return "identity"; + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return tokenStream; + } + }; + + public MultiplexerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) throws IOException { + super(indexSettings, name, settings); + this.filterNames = settings.getAsList("filters"); + this.preserveOriginal = settings.getAsBoolean("preserve_original", true); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + List> functions = new ArrayList<>(); + for (TokenFilterFactory tff : filters) { + functions.add(tff::create); + } + return new RemoveDuplicatesTokenFilter(new MultiplexTokenFilter(tokenStream, functions)); + } + + @Override + public void setReferences(Map factories) { + filters = new ArrayList<>(); + if (preserveOriginal) { + filters.add(IDENTITY_FACTORY); + } + for (String filter : filterNames) { + String[] parts = Strings.tokenizeToStringArray(filter, ","); + if (parts.length == 1) { + filters.add(resolveFilterFactory(factories, parts[0])); + } else { + List chain = new ArrayList<>(); + for (String subfilter : parts) { + chain.add(resolveFilterFactory(factories, subfilter)); + } + filters.add(chainFilters(filter, chain)); + } + } + } + + private TokenFilterFactory chainFilters(String name, List filters) { + return new TokenFilterFactory() { + @Override + public String name() { + return name; + } + + @Override + public TokenStream create(TokenStream tokenStream) { + for (TokenFilterFactory tff : filters) { + tokenStream = tff.create(tokenStream); + } + return tokenStream; + } + }; + } + + private TokenFilterFactory resolveFilterFactory(Map factories, String name) { + if (factories.containsKey(name) == false) { + throw new IllegalArgumentException("Multiplexing filter [" + name() + "] refers to undefined tokenfilter [" + name + "]"); + } else { + return factories.get(name); + } + } + + private final class MultiplexTokenFilter extends TokenFilter { + + private final TokenStream source; + private final int filterCount; + + private int selector; + + /** + * Creates a MultiplexTokenFilter on the given input with a set of filters + */ + MultiplexTokenFilter(TokenStream input, List> filters) { + super(input); + TokenStream source = new MultiplexerFilter(input); + for (int i = 0; i < filters.size(); i++) { + final int slot = i; + source = new ConditionalTokenFilter(source, filters.get(i)) { + @Override + protected boolean shouldFilter() { + return slot == selector; + } + }; + } + this.source = source; + this.filterCount = filters.size(); + this.selector = filterCount - 1; + } + + @Override + public boolean incrementToken() throws IOException { + return source.incrementToken(); + } + + @Override + public void end() throws IOException { + source.end(); + } + + @Override + public void reset() throws IOException { + source.reset(); + } + + private final class MultiplexerFilter extends TokenFilter { + + State state; + PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + + private MultiplexerFilter(TokenStream input) { + super(input); + } + + @Override + public boolean incrementToken() throws IOException { + if (selector >= filterCount - 1) { + selector = 0; + if (input.incrementToken() == false) { + return false; + } + state = captureState(); + return true; + } + restoreState(state); + posIncAtt.setPositionIncrement(0); + selector++; + return true; + } + + @Override + public void reset() throws IOException { + super.reset(); + selector = filterCount - 1; + this.state = null; + } + } + + } +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/MultiplexerTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/MultiplexerTokenFilterTests.java new file mode 100644 index 0000000000000..c39fa05c26f72 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/MultiplexerTokenFilterTests.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.util.Collections; + +public class MultiplexerTokenFilterTests extends ESTokenStreamTestCase { + + public void testMultiplexingFilter() throws IOException { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.filter.t.type", "truncate") + .put("index.analysis.filter.t.length", "2") + .put("index.analysis.filter.multiplexFilter.type", "multiplexer") + .putList("index.analysis.filter.multiplexFilter.filters", "lowercase, t", "uppercase") + .put("index.analysis.analyzer.myAnalyzer.type", "custom") + .put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.myAnalyzer.filter", "multiplexFilter") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings); + + try (NamedAnalyzer analyzer = indexAnalyzers.get("myAnalyzer")) { + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "ONe tHree", new String[]{ + "ONe", "on", "ONE", "tHree", "th", "THREE" + }, new int[]{ + 1, 0, 0, 1, 0, 0 + }); + // Duplicates are removed + assertAnalyzesTo(analyzer, "ONe THREE", new String[]{ + "ONe", "on", "ONE", "THREE", "th" + }, new int[]{ + 1, 0, 0, 1, 0, 0 + }); + } + } + + public void testMultiplexingNoOriginal() throws IOException { + + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.filter.t.type", "truncate") + .put("index.analysis.filter.t.length", "2") + .put("index.analysis.filter.multiplexFilter.type", "multiplexer") + .put("index.analysis.filter.multiplexFilter.preserve_original", "false") + .putList("index.analysis.filter.multiplexFilter.filters", "lowercase, t", "uppercase") + .put("index.analysis.analyzer.myAnalyzer.type", "custom") + .put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.myAnalyzer.filter", "multiplexFilter") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings); + + try (NamedAnalyzer analyzer = indexAnalyzers.get("myAnalyzer")) { + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "ONe tHree", new String[]{ + "on", "ONE", "th", "THREE" + }, new int[]{ + 1, 0, 1, 0, + }); + } + + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 75cbc45af574a..82b7e533859a2 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -172,7 +172,18 @@ public Map buildTokenFilterFactories(IndexSettings i */ tokenFilters.put("synonym", requiresAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings))); tokenFilters.put("synonym_graph", requiresAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings))); - return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters); + + Map mappings + = buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters); + + // ReferringTokenFilters require references to other tokenfilters, so we pass these in + // after all factories have been registered + for (TokenFilterFactory tff : mappings.values()) { + if (tff instanceof ReferringFilterFactory) { + ((ReferringFilterFactory)tff).setReferences(mappings); + } + } + return mappings; } public Map buildTokenizerFactories(IndexSettings indexSettings) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ReferringFilterFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/ReferringFilterFactory.java new file mode 100644 index 0000000000000..9eb9bc2dbd653 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/analysis/ReferringFilterFactory.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import java.util.Map; + +/** + * Marks a {@link TokenFilterFactory} that refers to other filter factories. + * + * The analysis registry will call {@link #setReferences(Map)} with a map of all + * available TokenFilterFactories after all factories have been registered + */ +public interface ReferringFilterFactory { + + /** + * Called with a map of all registered filter factories + */ + void setReferences(Map factories); + +} diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 36da9761b978d..26a5b87866c21 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.analysis; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.en.EnglishAnalyzer; From 730040bc3c62a0d557f32c0bec24b04745b03556 Mon Sep 17 00:00:00 2001 From: Peter Dyson Date: Wed, 20 Jun 2018 21:04:03 +1000 Subject: [PATCH 12/17] =?UTF-8?q?[Docs]=C2=A0Mention=20ip=5Frange=20dataty?= =?UTF-8?q?pes=20on=20ip=20type=20page=20(#31416)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A link to the ip_range datatype page provides a way for newer users to know it exists if they land directly on the ip datatype page first via a search. --- docs/reference/mapping/types/ip.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 512b0d725457e..695cd1c626bc3 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -36,6 +36,8 @@ GET my_index/_search // CONSOLE // TESTSETUP +NOTE: You can also store ip ranges in a single field using an <>. + [[ip-params]] ==== Parameters for `ip` fields From ee023b29afde4785caeebf3aa08aa00925931b63 Mon Sep 17 00:00:00 2001 From: Jonathan Pool Date: Wed, 20 Jun 2018 08:18:55 -0400 Subject: [PATCH 13/17] [Docs] Extend Homebrew installation instructions (#28902) Adding a note about proceeding after a successful homebrew installation. --- docs/reference/getting-started.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 937917823f5a6..b0889a723f511 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -158,6 +158,9 @@ On macOS, Elasticsearch can also be installed via https://brew.sh[Homebrew]: brew install elasticsearch -------------------------------------------------- +If installation succeeds, Homebrew will finish by saying that you can start Elasticsearch by entering +`elasticsearch`. Do that now. The expected response is described below, under <>. + [float] === Installation example with MSI Windows Installer @@ -216,6 +219,7 @@ And now we are ready to start our node and single cluster: -------------------------------------------------- [float] +[[successfully-running-node]] === Successfully running node If everything goes well with installation, you should see a bunch of messages that look like below: From a270984c5e7efbcee1e6db51b295b98064491685 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Wed, 20 Jun 2018 08:25:59 -0700 Subject: [PATCH 14/17] backport of: Add rollover-creation-date setting to rolled over index (#31144) (#31413) This commit introduces a new property to IndexMetaData called RolloverInfo. This object contains a map containing the aliases that were used to rollover the related index, which conditions were met, and at what time the rollover took place. much like the `index.creation_date`, it captures the approximate time that the index was rolled over to a new one. set version serialization check to 6.4 --- .../indices/rollover/MaxAgeCondition.java | 9 ++ .../indices/rollover/MaxDocsCondition.java | 9 ++ .../indices/rollover/MaxSizeCondition.java | 9 ++ .../admin/indices/rollover/RolloverInfo.java | 134 ++++++++++++++++++ .../rollover/TransportRolloverAction.java | 39 +++-- .../client/transport/TransportClient.java | 3 + .../cluster/metadata/IndexMetaData.java | 69 ++++++++- .../elasticsearch/indices/IndicesModule.java | 23 ++- .../java/org/elasticsearch/node/Node.java | 1 + .../admin/indices/rollover/RolloverIT.java | 25 +++- .../cluster/metadata/IndexMetaDataTests.java | 62 ++++++-- 11 files changed, 356 insertions(+), 27 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java index c0b0d2a3297da..bf6c9e2f69592 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -64,4 +65,12 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.field(NAME, value.getStringRep()); } + + public static MaxAgeCondition fromXContent(XContentParser parser) throws IOException { + if (parser.nextToken() == XContentParser.Token.VALUE_STRING) { + return new MaxAgeCondition(TimeValue.parseTimeValue(parser.text(), NAME)); + } else { + throw new IllegalArgumentException("invalid token: " + parser.currentToken()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java index 8fddb870e59e9..2f897fa6a0175 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -61,4 +62,12 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.field(NAME, value); } + + public static MaxDocsCondition fromXContent(XContentParser parser) throws IOException { + if (parser.nextToken() == XContentParser.Token.VALUE_NUMBER) { + return new MaxDocsCondition(parser.longValue()); + } else { + throw new IllegalArgumentException("invalid token: " + parser.currentToken()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java index bb6f37634ce87..f1a121a87d41e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -70,4 +71,12 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.field(NAME, value.getStringRep()); } + + public static MaxSizeCondition fromXContent(XContentParser parser) throws IOException { + if (parser.nextToken() == XContentParser.Token.VALUE_STRING) { + return new MaxSizeCondition(ByteSizeValue.parseBytesSizeValue(parser.text(), NAME)); + } else { + throw new IllegalArgumentException("invalid token: " + parser.currentToken()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java new file mode 100644 index 0000000000000..291dd3a0ddae7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * Class for holding Rollover related information within an index + */ +public class RolloverInfo extends AbstractDiffable implements Writeable, ToXContentFragment { + + public static final ParseField CONDITION_FIELD = new ParseField("met_conditions"); + public static final ParseField TIME_FIELD = new ParseField("time"); + + @SuppressWarnings("unchecked") + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("rollover_info", false, + (a, alias) -> new RolloverInfo(alias, (List) a[0], (Long) a[1])); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), + (p, c, n) -> p.namedObject(Condition.class, n, c), CONDITION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_FIELD); + } + + private final String alias; + private final List metConditions; + private final long time; + + public RolloverInfo(String alias, List metConditions, long time) { + this.alias = alias; + this.metConditions = metConditions; + this.time = time; + } + + public RolloverInfo(StreamInput in) throws IOException { + this.alias = in.readString(); + this.time = in.readVLong(); + this.metConditions = in.readNamedWriteableList(Condition.class); + } + + public static RolloverInfo parse(XContentParser parser, String alias) { + return PARSER.apply(parser, alias); + } + + public String getAlias() { + return alias; + } + + public List getMetConditions() { + return metConditions; + } + + public long getTime() { + return time; + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(RolloverInfo::new, in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(alias); + out.writeVLong(time); + out.writeNamedWriteableList(metConditions); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(alias); + builder.startObject(CONDITION_FIELD.getPreferredName()); + for (Condition condition : metConditions) { + condition.toXContent(builder, params); + } + builder.endObject(); + builder.field(TIME_FIELD.getPreferredName(), time); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(alias, metConditions, time); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RolloverInfo other = (RolloverInfo) obj; + return Objects.equals(alias, other.alias) && + Objects.equals(metConditions, other.metConditions) && + Objects.equals(time, other.time); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 10c491820763c..09e4f474c0d16 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasAction; @@ -131,7 +132,9 @@ public void onResponse(IndicesStatsResponse statsResponse) { new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false)); return; } - if (conditionResults.size() == 0 || conditionResults.values().stream().anyMatch(result -> result)) { + List metConditions = rolloverRequest.getConditions().values().stream() + .filter(condition -> conditionResults.get(condition.toString())).collect(Collectors.toList()); + if (conditionResults.size() == 0 || metConditions.size() > 0) { CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(unresolvedName, rolloverIndexName, rolloverRequest); createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> { @@ -141,13 +144,33 @@ public void onResponse(IndicesStatsResponse statsResponse) { rolloverRequest), ActionListener.wrap(aliasClusterStateUpdateResponse -> { if (aliasClusterStateUpdateResponse.isAcknowledged()) { - activeShardsObserver.waitForActiveShards(new String[]{rolloverIndexName}, - rolloverRequest.getCreateIndexRequest().waitForActiveShards(), - rolloverRequest.masterNodeTimeout(), - isShardsAcknowledged -> listener.onResponse(new RolloverResponse( - sourceIndexName, rolloverIndexName, conditionResults, false, true, true, - isShardsAcknowledged)), - listener::onFailure); + clusterService.submitStateUpdateTask("update_rollover_info", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + RolloverInfo rolloverInfo = new RolloverInfo(rolloverRequest.getAlias(), metConditions, + threadPool.absoluteTimeInMillis()); + return ClusterState.builder(currentState) + .metaData(MetaData.builder(currentState.metaData()) + .put(IndexMetaData.builder(currentState.metaData().index(sourceIndexName)) + .putRolloverInfo(rolloverInfo))).build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + activeShardsObserver.waitForActiveShards(new String[]{rolloverIndexName}, + rolloverRequest.getCreateIndexRequest().waitForActiveShards(), + rolloverRequest.masterNodeTimeout(), + isShardsAcknowledged -> listener.onResponse(new RolloverResponse( + sourceIndexName, rolloverIndexName, conditionResults, false, true, true, + isShardsAcknowledged)), + listener::onFailure); + } + }); } else { listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, false, true, false, false)); diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 50dad09dfa7b2..94bf912020806 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.node.InternalSettingsPreparer; import org.elasticsearch.node.Node; @@ -149,9 +150,11 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter); SearchModule searchModule = new SearchModule(settings, true, pluginsService.filterPlugins(SearchPlugin.class)); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); List entries = new ArrayList<>(); entries.addAll(NetworkModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); + entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(ClusterModule.getNamedWriteables()); entries.addAll(pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.getNamedWriteables().stream()) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index ed5ab46069df0..17cf34c1e8736 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -25,6 +25,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; @@ -295,6 +296,7 @@ public Iterator> settings() { static final String KEY_STATE = "state"; static final String KEY_MAPPINGS = "mappings"; static final String KEY_ALIASES = "aliases"; + static final String KEY_ROLLOVER_INFOS = "rollover_info"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; @@ -332,13 +334,14 @@ public Iterator> settings() { private final Version indexUpgradedVersion; private final ActiveShardCount waitForActiveShards; + private final ImmutableOpenMap rolloverInfos; private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, ImmutableOpenMap customs, ImmutableOpenIntMap> inSyncAllocationIds, DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, Version indexCreatedVersion, Version indexUpgradedVersion, - int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards) { + int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap rolloverInfos) { this.index = index; this.version = version; @@ -363,6 +366,7 @@ private IndexMetaData(Index index, long version, long[] primaryTerms, State stat this.routingFactor = routingNumShards / numberOfShards; this.routingPartitionSize = routingPartitionSize; this.waitForActiveShards = waitForActiveShards; + this.rolloverInfos = rolloverInfos; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -518,6 +522,10 @@ public ImmutableOpenIntMap> getInSyncAllocationIds() { return inSyncAllocationIds; } + public ImmutableOpenMap getRolloverInfos() { + return rolloverInfos; + } + public Set inSyncAllocationIds(int shardId) { assert shardId >= 0 && shardId < numberOfShards; return inSyncAllocationIds.get(shardId); @@ -588,6 +596,9 @@ public boolean equals(Object o) { if (!inSyncAllocationIds.equals(that.inSyncAllocationIds)) { return false; } + if (rolloverInfos.equals(that.rolloverInfos) == false) { + return false; + } return true; } @@ -604,6 +615,7 @@ public int hashCode() { result = 31 * result + Long.hashCode(routingNumShards); result = 31 * result + Arrays.hashCode(primaryTerms); result = 31 * result + inSyncAllocationIds.hashCode(); + result = 31 * result + rolloverInfos.hashCode(); return result; } @@ -639,6 +651,7 @@ private static class IndexMetaDataDiff implements Diff { private final Diff> aliases; private final Diff> customs; private final Diff>> inSyncAllocationIds; + private final Diff> rolloverInfos; IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { index = after.index.getName(); @@ -652,6 +665,7 @@ private static class IndexMetaDataDiff implements Diff { customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); + rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer()); } IndexMetaDataDiff(StreamInput in) throws IOException { @@ -680,6 +694,13 @@ public Diff readDiff(StreamInput in, String key) throws IOException { }); inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + rolloverInfos = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), RolloverInfo::new, + RolloverInfo::readDiffFrom); + } else { + ImmutableOpenMap emptyMap = ImmutableOpenMap.of(); + rolloverInfos = DiffableUtils.diff(emptyMap, emptyMap, DiffableUtils.getStringKeySerializer()); + } } @Override @@ -694,6 +715,9 @@ public void writeTo(StreamOutput out) throws IOException { aliases.writeTo(out); customs.writeTo(out); inSyncAllocationIds.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + rolloverInfos.writeTo(out); + } } @Override @@ -708,6 +732,7 @@ public IndexMetaData apply(IndexMetaData part) { builder.aliases.putAll(aliases.apply(part.aliases)); builder.customs.putAll(customs.apply(part.customs)); builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds)); + builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); return builder.build(); } } @@ -741,6 +766,12 @@ public static IndexMetaData readFrom(StreamInput in) throws IOException { Set allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key); builder.putInSyncAllocationIds(key, allocationIds); } + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + int rolloverAliasesSize = in.readVInt(); + for (int i = 0; i < rolloverAliasesSize; i++) { + builder.putRolloverInfo(new RolloverInfo(in)); + } + } return builder.build(); } @@ -770,6 +801,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(cursor.key); DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out); } + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeVInt(rolloverInfos.size()); + for (ObjectCursor cursor : rolloverInfos.values()) { + cursor.value.writeTo(out); + } + } } public static Builder builder(String index) { @@ -791,6 +828,7 @@ public static class Builder { private final ImmutableOpenMap.Builder aliases; private final ImmutableOpenMap.Builder customs; private final ImmutableOpenIntMap.Builder> inSyncAllocationIds; + private final ImmutableOpenMap.Builder rolloverInfos; private Integer routingNumShards; public Builder(String index) { @@ -799,6 +837,7 @@ public Builder(String index) { this.aliases = ImmutableOpenMap.builder(); this.customs = ImmutableOpenMap.builder(); this.inSyncAllocationIds = ImmutableOpenIntMap.builder(); + this.rolloverInfos = ImmutableOpenMap.builder(); } public Builder(IndexMetaData indexMetaData) { @@ -812,6 +851,7 @@ public Builder(IndexMetaData indexMetaData) { this.customs = ImmutableOpenMap.builder(indexMetaData.customs); this.routingNumShards = indexMetaData.routingNumShards; this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds); + this.rolloverInfos = ImmutableOpenMap.builder(indexMetaData.rolloverInfos); } public String index() { @@ -952,6 +992,15 @@ public Builder putInSyncAllocationIds(int shardId, Set allocationIds) { return this; } + public RolloverInfo getRolloverInfo(String alias) { + return rolloverInfos.get(alias); + } + + public Builder putRolloverInfo(RolloverInfo rolloverInfo) { + rolloverInfos.put(rolloverInfo.getAlias(), rolloverInfo); + return this; + } + public long version() { return this.version; } @@ -1090,7 +1139,7 @@ public IndexMetaData build() { return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, - indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards); + indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build()); } public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { @@ -1144,6 +1193,12 @@ public static void toXContent(IndexMetaData indexMetaData, XContentBuilder build } builder.endObject(); + builder.startObject(KEY_ROLLOVER_INFOS); + for (ObjectCursor cursor : indexMetaData.getRolloverInfos().values()) { + cursor.value.toXContent(builder, params); + } + builder.endObject(); + builder.endObject(); } @@ -1203,6 +1258,16 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti throw new IllegalArgumentException("Unexpected token: " + token); } } + } else if (KEY_ROLLOVER_INFOS.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + builder.putRolloverInfo(RolloverInfo.parse(parser, currentFieldName)); + } else { + throw new IllegalArgumentException("Unexpected token: " + token); + } + } } else if ("warmers".equals(currentFieldName)) { // TODO: do this in 6.0: // throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?"); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index a7613e34f6973..2159b5b53e0c3 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -24,9 +24,12 @@ import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.resync.TransportResyncReplicationAction; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.AllFieldMapper; @@ -65,6 +68,7 @@ import org.elasticsearch.plugins.MapperPlugin; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; @@ -89,15 +93,26 @@ public IndicesModule(List mapperPlugins) { } private void registerBuiltinWritables() { - namedWritables.add(new Entry(Condition.class, MaxAgeCondition.NAME, MaxAgeCondition::new)); - namedWritables.add(new Entry(Condition.class, MaxDocsCondition.NAME, MaxDocsCondition::new)); - namedWritables.add(new Entry(Condition.class, MaxSizeCondition.NAME, MaxSizeCondition::new)); + namedWritables.add(new NamedWriteableRegistry.Entry(Condition.class, MaxAgeCondition.NAME, MaxAgeCondition::new)); + namedWritables.add(new NamedWriteableRegistry.Entry(Condition.class, MaxDocsCondition.NAME, MaxDocsCondition::new)); + namedWritables.add(new NamedWriteableRegistry.Entry(Condition.class, MaxSizeCondition.NAME, MaxSizeCondition::new)); } - public List getNamedWriteables() { + public List getNamedWriteables() { return namedWritables; } + public List getNamedXContents() { + return Arrays.asList( + new NamedXContentRegistry.Entry(Condition.class, new ParseField(MaxAgeCondition.NAME), (p, c) -> + MaxAgeCondition.fromXContent(p)), + new NamedXContentRegistry.Entry(Condition.class, new ParseField(MaxDocsCondition.NAME), (p, c) -> + MaxDocsCondition.fromXContent(p)), + new NamedXContentRegistry.Entry(Condition.class, new ParseField(MaxSizeCondition.NAME), (p, c) -> + MaxSizeCondition.fromXContent(p)) + ); + } + private Map getMappers(List mapperPlugins) { Map mappers = new LinkedHashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 0eba4a3c5c10b..73e95160ca7da 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -391,6 +391,7 @@ protected Node(final Environment environment, Collection final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of( NetworkModule.getNamedXContents().stream(), + indicesModule.getNamedXContents().stream(), searchModule.getNamedXContents().stream(), pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.getNamedXContent().stream()), diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 869bba452fefe..aa35d9d273a92 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -37,6 +37,7 @@ import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Set; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -44,6 +45,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.collection.IsEmptyCollection.empty; +import static org.hamcrest.core.CombinableMatcher.both; +import static org.hamcrest.number.OrderingComparison.greaterThanOrEqualTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class RolloverIT extends ESIntegTestCase { @@ -70,6 +75,7 @@ public void testRolloverOnEmptyIndex() throws Exception { } public void testRollover() throws Exception { + long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L; assertAcked(prepareCreate("test_index-2").addAlias(new Alias("test_alias")).get()); index("test_index-2", "type1", "1", "field", "value"); flush("test_index-2"); @@ -84,6 +90,11 @@ public void testRollover() throws Exception { assertFalse(oldIndex.getAliases().containsKey("test_alias")); final IndexMetaData newIndex = state.metaData().index("test_index-000003"); assertTrue(newIndex.getAliases().containsKey("test_alias")); + assertThat(oldIndex.getRolloverInfos().size(), equalTo(1)); + assertThat(oldIndex.getRolloverInfos().get("test_alias").getAlias(), equalTo("test_alias")); + assertThat(oldIndex.getRolloverInfos().get("test_alias").getMetConditions(), is(empty())); + assertThat(oldIndex.getRolloverInfos().get("test_alias").getTime(), + is(both(greaterThanOrEqualTo(beforeTime)).and(lessThanOrEqualTo(client().threadPool().absoluteTimeInMillis() + 1000L)))); } public void testRolloverWithIndexSettings() throws Exception { @@ -246,17 +257,27 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("No rollover with a large max_size condition", response.isRolledOver(), equalTo(false)); + final IndexMetaData oldIndex = client().admin().cluster().prepareState().get().getState().metaData().index("test-1"); + assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } // A small max_size { + ByteSizeValue maxSizeValue = new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES); + long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L; final RolloverResponse response = client().admin().indices() .prepareRolloverIndex("test_alias") - .addMaxIndexSizeCondition(new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES)) + .addMaxIndexSizeCondition(maxSizeValue) .get(); assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("Should rollover with a small max_size condition", response.isRolledOver(), equalTo(true)); + final IndexMetaData oldIndex = client().admin().cluster().prepareState().get().getState().metaData().index("test-1"); + List metConditions = oldIndex.getRolloverInfos().get("test_alias").getMetConditions(); + assertThat(metConditions.size(), equalTo(1)); + assertThat(metConditions.get(0).toString(), equalTo(new MaxSizeCondition(maxSizeValue).toString())); + assertThat(oldIndex.getRolloverInfos().get("test_alias").getTime(), + is(both(greaterThanOrEqualTo(beforeTime)).and(lessThanOrEqualTo(client().threadPool().absoluteTimeInMillis() + 1000L)))); } // An empty index @@ -268,6 +289,8 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-000002")); assertThat(response.getNewIndex(), equalTo("test-000003")); assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); + final IndexMetaData oldIndex = client().admin().cluster().prepareState().get().getState().metaData().index("test-000002"); + assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index 7734a9d7b4e6a..9e8a5e04f43c1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -19,18 +19,31 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.Set; @@ -38,6 +51,23 @@ public class IndexMetaDataTests extends ESTestCase { + private IndicesModule INDICES_MODULE = new IndicesModule(Collections.emptyList()); + + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Override + protected NamedWriteableRegistry writableRegistry() { + return new NamedWriteableRegistry(INDICES_MODULE.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(INDICES_MODULE.getNamedXContents()); + } + public void testIndexMetaDataSerialization() throws IOException { Integer numShard = randomFrom(1, 2, 4, 8, 16); int numberOfReplicas = randomIntBetween(0, 10); @@ -50,7 +80,12 @@ public void testIndexMetaDataSerialization() throws IOException { .creationDate(randomLong()) .primaryTerm(0, 2) .setRoutingNumShards(32) - .build(); + .putRolloverInfo( + new RolloverInfo(randomAlphaOfLength(5), + Arrays.asList(new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), + new MaxSizeCondition(new ByteSizeValue(randomNonNegativeLong())), + new MaxDocsCondition(randomNonNegativeLong())), + randomNonNegativeLong())).build(); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -71,17 +106,20 @@ public void testIndexMetaDataSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); metaData.writeTo(out); - IndexMetaData deserialized = IndexMetaData.readFrom(out.bytes().streamInput()); - assertEquals(metaData, deserialized); - assertEquals(metaData.hashCode(), deserialized.hashCode()); - - assertEquals(metaData.getNumberOfReplicas(), deserialized.getNumberOfReplicas()); - assertEquals(metaData.getNumberOfShards(), deserialized.getNumberOfShards()); - assertEquals(metaData.getCreationVersion(), deserialized.getCreationVersion()); - assertEquals(metaData.getRoutingNumShards(), deserialized.getRoutingNumShards()); - assertEquals(metaData.getCreationDate(), deserialized.getCreationDate()); - assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor()); - assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0)); + try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { + IndexMetaData deserialized = IndexMetaData.readFrom(in); + assertEquals(metaData, deserialized); + assertEquals(metaData.hashCode(), deserialized.hashCode()); + + assertEquals(metaData.getNumberOfReplicas(), deserialized.getNumberOfReplicas()); + assertEquals(metaData.getNumberOfShards(), deserialized.getNumberOfShards()); + assertEquals(metaData.getCreationVersion(), deserialized.getCreationVersion()); + assertEquals(metaData.getRoutingNumShards(), deserialized.getRoutingNumShards()); + assertEquals(metaData.getCreationDate(), deserialized.getCreationDate()); + assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor()); + assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0)); + assertEquals(metaData.getRolloverInfos(), deserialized.getRolloverInfos()); + } } public void testGetRoutingFactor() { From 196ddbff4a3b5f935c6237ebbb9d10d0312a7163 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Wed, 20 Jun 2018 08:26:17 -0700 Subject: [PATCH 15/17] backport of: add is-write-index flag to aliases (#30942) (#31412) * add is-write-index flag to aliases (#30942) This commit adds the is-write-index flag for aliases. It allows requests to set the flag, and responses to display the flag. It does not validate and/or affect any indexing/getting/updating behavior of Elasticsearch -- this will be done in a follow-up PR. * [TEST] Double write alias fault (#30942) --- docs/reference/indices/aliases.asciidoc | 88 ++++++++++++ .../test/indices.create/10_basic.yml | 25 +++- .../action/admin/indices/alias/Alias.java | 34 +++++ .../indices/alias/IndicesAliasesRequest.java | 22 +++ .../alias/IndicesAliasesRequestBuilder.java | 12 ++ .../alias/TransportIndicesAliasesAction.java | 3 +- .../rollover/TransportRolloverAction.java | 2 +- .../cluster/metadata/AliasAction.java | 24 +++- .../cluster/metadata/AliasMetaData.java | 44 +++++- .../cluster/metadata/AliasOrIndex.java | 30 ++++ .../cluster/metadata/AliasValidator.java | 4 +- .../cluster/metadata/MetaData.java | 31 ++-- .../metadata/MetaDataCreateIndexService.java | 2 +- .../metadata/MetaDataIndexAliasesService.java | 2 +- .../indices/alias/AliasActionsTests.java | 3 + .../create/CreateIndexRequestTests.java | 3 +- .../indices/shrink/ResizeRequestTests.java | 3 +- .../cluster/metadata/AliasMetaDataTests.java | 17 +++ .../metadata/IndexCreationTaskTests.java | 35 +++++ .../MetaDataIndexAliasesServiceTests.java | 134 +++++++++++++++++- .../cluster/metadata/MetaDataTests.java | 35 +++++ .../metadata/ToAndFromJsonMetaDataTests.java | 13 +- .../SharedClusterSnapshotRestoreIT.java | 6 +- .../index/RandomCreateIndexGenerator.java | 4 + 24 files changed, 534 insertions(+), 42 deletions(-) diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 447873a595bbe..b155cfef302fb 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -244,6 +244,94 @@ GET /alias2/_search?q=user:kimchy&routing=2,3 // CONSOLE // TEST[continued] +[float] +[[aliases-write-index]] +==== Write Index + +It is possible to associate the index pointed to by an alias as the write index. +When specified, all index and update requests against an alias that point to multiple +indices will attempt to resolve to the one index that is the write index. +Only one index per alias can be assigned to be the write index at a time. If no write index is specified +and there are multiple indices referenced by an alias, then writes will not be allowed. + +It is possible to specify an index associated with an alias as a write index using both the aliases API +and index creation API. + +[source,js] +-------------------------------------------------- +POST /_aliases +{ + "actions" : [ + { + "add" : { + "index" : "test", + "alias" : "alias1", + "is_write_index" : true + } + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT test\n/] + +In this example, we associate the alias `alias1` to both `test` and `test2`, where +`test` will be the index chosen for writing to. + +[source,js] +-------------------------------------------------- +PUT /alias1/_doc/1 +{ + "foo": "bar" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The new document that was indexed to `/alias1/_doc/1` will be indexed as if it were +`/test/_doc/1`. + +[source,js] +-------------------------------------------------- +GET /test/_doc/1 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +To swap which index is the write index for an alias, the Aliases API can be leveraged to +do an atomic swap. The swap is not dependent on the ordering of the actions. + +[source,js] +-------------------------------------------------- +POST /_aliases +{ + "actions" : [ + { + "add" : { + "index" : "test", + "alias" : "alias1", + "is_write_index" : true + } + }, { + "add" : { + "index" : "test2", + "alias" : "alias1", + "is_write_index" : false + } + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT test\nPUT test2\n/] + +[IMPORTANT] +===================================== +Aliases that do not explicitly set `is_write_index: true` for an index, and +only reference one index, will have that referenced index behave as if it is the write index +until an additional index is referenced. At that point, there will be no write index and +writes will be rejected. +===================================== [float] [[alias-adding]] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml index b0aad6f64bce4..24512b9f31ae1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -82,7 +82,6 @@ indices.get_alias: index: test_index - - match: {test_index.aliases.test_alias: {}} - match: {test_index.aliases.test_blias.search_routing: b} - match: {test_index.aliases.test_blias.index_routing: b} - is_false: test_index.aliases.test_blias.filter @@ -90,6 +89,30 @@ - is_false: test_index.aliases.test_clias.index_routing - is_false: test_index.aliases.test_clias.search_routing +--- +"Create index with write aliases": + - skip: + version: " - 6.99.99" + reason: is_write_index is not implemented in ES <= 6.x + - do: + indices.create: + index: test_index + body: + aliases: + test_alias: {} + test_blias: + is_write_index: false + test_clias: + is_write_index: true + + - do: + indices.get_alias: + index: test_index + + - is_false: test_index.aliases.test_alias.is_write_index + - is_false: test_index.aliases.test_blias.is_write_index + - is_true: test_index.aliases.test_clias.is_write_index + --- "Create index with no type mappings": - do: diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java index 9172500a8cb50..0f8439643b8f0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.alias; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -49,6 +50,7 @@ public class Alias implements Streamable, ToXContentFragment { private static final ParseField ROUTING = new ParseField("routing"); private static final ParseField INDEX_ROUTING = new ParseField("index_routing", "indexRouting", "index-routing"); private static final ParseField SEARCH_ROUTING = new ParseField("search_routing", "searchRouting", "search-routing"); + private static final ParseField IS_WRITE_INDEX = new ParseField("is_write_index"); private String name; @@ -61,6 +63,9 @@ public class Alias implements Streamable, ToXContentFragment { @Nullable private String searchRouting; + @Nullable + private Boolean writeIndex; + private Alias() { } @@ -167,6 +172,21 @@ public Alias searchRouting(String searchRouting) { return this; } + /** + * @return the write index flag for the alias + */ + public Boolean writeIndex() { + return writeIndex; + } + + /** + * Sets whether an alias is pointing to a write-index + */ + public Alias writeIndex(@Nullable Boolean writeIndex) { + this.writeIndex = writeIndex; + return this; + } + /** * Allows to read an alias from the provided input stream */ @@ -182,6 +202,11 @@ public void readFrom(StreamInput in) throws IOException { filter = in.readOptionalString(); indexRouting = in.readOptionalString(); searchRouting = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + writeIndex = in.readOptionalBoolean(); + } else { + writeIndex = null; + } } @Override @@ -190,6 +215,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(filter); out.writeOptionalString(indexRouting); out.writeOptionalString(searchRouting); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalBoolean(writeIndex); + } } /** @@ -219,6 +247,10 @@ public static Alias fromXContent(XContentParser parser) throws IOException { } else if (SEARCH_ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { alias.searchRouting(parser.text()); } + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + if (IS_WRITE_INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + alias.writeIndex(parser.booleanValue()); + } } } return alias; @@ -245,6 +277,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } + builder.field(IS_WRITE_INDEX.getPreferredName(), writeIndex); + builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 6332f50c1452e..c7e7288e74f55 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.alias; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.AliasesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -84,6 +85,7 @@ public static class AliasActions implements AliasesRequest, Writeable, ToXConten private static final ParseField ROUTING = new ParseField("routing"); private static final ParseField INDEX_ROUTING = new ParseField("index_routing", "indexRouting", "index-routing"); private static final ParseField SEARCH_ROUTING = new ParseField("search_routing", "searchRouting", "search-routing"); + private static final ParseField IS_WRITE_INDEX = new ParseField("is_write_index"); private static final ParseField ADD = new ParseField("add"); private static final ParseField REMOVE = new ParseField("remove"); @@ -179,6 +181,7 @@ private static ObjectParser parser(String name, Supplier REMOVE_PARSER = parser(REMOVE.getPreferredName(), AliasActions::remove); private static final ObjectParser REMOVE_INDEX_PARSER = parser(REMOVE_INDEX.getPreferredName(), @@ -215,6 +218,7 @@ private static ObjectParser parser(String name, Supplier actions = unmodifiableList(Arrays.asList( - new AliasAction.Add(newIndex, request.getAlias(), null, null, null), + new AliasAction.Add(newIndex, request.getAlias(), null, null, null, null), new AliasAction.Remove(oldIndex, request.getAlias()))); final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(actions) .ackTimeout(request.ackTimeout()) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java index ff49d072815fb..436ae79c10319 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java @@ -51,7 +51,7 @@ public String getIndex() { /** * Apply the action. - * + * * @param aliasValidator call to validate a new alias before adding it to the builder * @param metadata metadata builder for the changes made by all actions as part of this request * @param index metadata for the index being changed @@ -64,7 +64,7 @@ public String getIndex() { */ @FunctionalInterface public interface NewAliasValidator { - void validate(String alias, @Nullable String indexRouting, @Nullable String filter); + void validate(String alias, @Nullable String indexRouting, @Nullable String filter, @Nullable Boolean writeIndex); } /** @@ -82,10 +82,14 @@ public static class Add extends AliasAction { @Nullable private final String searchRouting; + @Nullable + private final Boolean writeIndex; + /** * Build the operation. */ - public Add(String index, String alias, @Nullable String filter, @Nullable String indexRouting, @Nullable String searchRouting) { + public Add(String index, String alias, @Nullable String filter, @Nullable String indexRouting, + @Nullable String searchRouting, @Nullable Boolean writeIndex) { super(index); if (false == Strings.hasText(alias)) { throw new IllegalArgumentException("[alias] is required"); @@ -94,6 +98,7 @@ public Add(String index, String alias, @Nullable String filter, @Nullable String this.filter = filter; this.indexRouting = indexRouting; this.searchRouting = searchRouting; + this.writeIndex = writeIndex; } /** @@ -103,6 +108,10 @@ public String getAlias() { return alias; } + public Boolean writeIndex() { + return writeIndex; + } + @Override boolean removeIndex() { return false; @@ -110,15 +119,18 @@ boolean removeIndex() { @Override boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index) { - aliasValidator.validate(alias, indexRouting, filter); + aliasValidator.validate(alias, indexRouting, filter, writeIndex); + AliasMetaData newAliasMd = AliasMetaData.newAliasMetaDataBuilder(alias).filter(filter).indexRouting(indexRouting) - .searchRouting(searchRouting).build(); + .searchRouting(searchRouting).writeIndex(writeIndex).build(); + // Check if this alias already exists AliasMetaData currentAliasMd = index.getAliases().get(alias); if (currentAliasMd != null && currentAliasMd.equals(newAliasMd)) { // It already exists, ignore it return false; } + metadata.put(IndexMetaData.builder(index).putAlias(newAliasMd)); return true; } @@ -182,4 +194,4 @@ boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, Index throw new UnsupportedOperationException(); } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java index 945c94bcd642d..e17103ee30e7f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java @@ -20,8 +20,10 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -55,7 +57,10 @@ public class AliasMetaData extends AbstractDiffable implements To private final Set searchRoutingValues; - private AliasMetaData(String alias, CompressedXContent filter, String indexRouting, String searchRouting) { + @Nullable + private final Boolean writeIndex; + + private AliasMetaData(String alias, CompressedXContent filter, String indexRouting, String searchRouting, Boolean writeIndex) { this.alias = alias; this.filter = filter; this.indexRouting = indexRouting; @@ -65,10 +70,11 @@ private AliasMetaData(String alias, CompressedXContent filter, String indexRouti } else { searchRoutingValues = emptySet(); } + this.writeIndex = writeIndex; } private AliasMetaData(AliasMetaData aliasMetaData, String alias) { - this(alias, aliasMetaData.filter(), aliasMetaData.indexRouting(), aliasMetaData.searchRouting()); + this(alias, aliasMetaData.filter(), aliasMetaData.indexRouting(), aliasMetaData.searchRouting(), aliasMetaData.writeIndex()); } public String alias() { @@ -111,6 +117,10 @@ public Set searchRoutingValues() { return searchRoutingValues; } + public Boolean writeIndex() { + return writeIndex; + } + public static Builder builder(String alias) { return new Builder(alias); } @@ -138,6 +148,8 @@ public boolean equals(Object o) { if (indexRouting != null ? !indexRouting.equals(that.indexRouting) : that.indexRouting != null) return false; if (searchRouting != null ? !searchRouting.equals(that.searchRouting) : that.searchRouting != null) return false; + if (writeIndex != null ? writeIndex != that.writeIndex : that.writeIndex != null) + return false; return true; } @@ -148,6 +160,7 @@ public int hashCode() { result = 31 * result + (filter != null ? filter.hashCode() : 0); result = 31 * result + (indexRouting != null ? indexRouting.hashCode() : 0); result = 31 * result + (searchRouting != null ? searchRouting.hashCode() : 0); + result = 31 * result + (writeIndex != null ? writeIndex.hashCode() : 0); return result; } @@ -173,6 +186,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalBoolean(writeIndex()); + } } public AliasMetaData(StreamInput in) throws IOException { @@ -194,6 +210,11 @@ public AliasMetaData(StreamInput in) throws IOException { searchRouting = null; searchRoutingValues = emptySet(); } + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + writeIndex = in.readOptionalBoolean(); + } else { + writeIndex = null; + } } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -221,6 +242,9 @@ public static class Builder { private String searchRouting; + @Nullable + private Boolean writeIndex; + public Builder(String alias) { this.alias = alias; @@ -231,6 +255,7 @@ public Builder(AliasMetaData aliasMetaData) { filter = aliasMetaData.filter(); indexRouting = aliasMetaData.indexRouting(); searchRouting = aliasMetaData.searchRouting(); + writeIndex = aliasMetaData.writeIndex(); } public String alias() { @@ -284,8 +309,13 @@ public Builder searchRouting(String searchRouting) { return this; } + public Builder writeIndex(@Nullable Boolean writeIndex) { + this.writeIndex = writeIndex; + return this; + } + public AliasMetaData build() { - return new AliasMetaData(alias, filter, indexRouting, searchRouting); + return new AliasMetaData(alias, filter, indexRouting, searchRouting, writeIndex); } public static void toXContent(AliasMetaData aliasMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { @@ -307,6 +337,10 @@ public static void toXContent(AliasMetaData aliasMetaData, XContentBuilder build builder.field("search_routing", aliasMetaData.searchRouting()); } + if (aliasMetaData.writeIndex() != null) { + builder.field("is_write_index", aliasMetaData.writeIndex()); + } + builder.endObject(); } @@ -343,6 +377,10 @@ public static AliasMetaData fromXContent(XContentParser parser) throws IOExcepti } } else if (token == XContentParser.Token.START_ARRAY) { parser.skipChildren(); + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + if ("is_write_index".equals(currentFieldName)) { + builder.writeIndex(parser.booleanValue()); + } } } return builder.build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java index 786bd9af78a4c..d8bb04a1a39c3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java @@ -19,12 +19,16 @@ package org.elasticsearch.cluster.metadata; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.stream.Collectors; /** * Encapsulates the {@link IndexMetaData} instances of a concrete index or indices an alias is pointing to. @@ -78,6 +82,7 @@ class Alias implements AliasOrIndex { private final String aliasName; private final List referenceIndexMetaDatas; + private SetOnce writeIndex = new SetOnce<>(); public Alias(AliasMetaData aliasMetaData, IndexMetaData indexMetaData) { this.aliasName = aliasMetaData.getAlias(); @@ -90,11 +95,21 @@ public boolean isAlias() { return true; } + public String getAliasName() { + return aliasName; + } + @Override public List getIndices() { return referenceIndexMetaDatas; } + + @Nullable + public IndexMetaData getWriteIndex() { + return writeIndex.get(); + } + /** * Returns the unique alias metadata per concrete index. * @@ -138,5 +153,20 @@ void addIndex(IndexMetaData indexMetaData) { this.referenceIndexMetaDatas.add(indexMetaData); } + public void computeAndValidateWriteIndex() { + List writeIndices = referenceIndexMetaDatas.stream() + .filter(idxMeta -> Boolean.TRUE.equals(idxMeta.getAliases().get(aliasName).writeIndex())) + .collect(Collectors.toList()); + if (referenceIndexMetaDatas.size() == 1) { + writeIndex.set(referenceIndexMetaDatas.get(0)); + } else if (writeIndices.size() == 1) { + writeIndex.set(writeIndices.get(0)); + } else if (writeIndices.size() > 1) { + List writeIndicesStrings = writeIndices.stream() + .map(i -> i.getIndex().getName()).collect(Collectors.toList()); + throw new IllegalStateException("alias [" + aliasName + "] has more than one write index [" + + Strings.collectionToCommaDelimitedString(writeIndicesStrings) + "]"); + } + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index 7f1348dd1587f..33e1687e241fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -57,7 +57,7 @@ public AliasValidator(Settings settings) { * @throws IllegalArgumentException if the alias is not valid */ public void validateAlias(Alias alias, String index, MetaData metaData) { - validateAlias(alias.name(), index, alias.indexRouting(), name -> metaData.index(name)); + validateAlias(alias.name(), index, alias.indexRouting(), metaData::index); } /** @@ -66,7 +66,7 @@ public void validateAlias(Alias alias, String index, MetaData metaData) { * @throws IllegalArgumentException if the alias is not valid */ public void validateAliasMetaData(AliasMetaData aliasMetaData, String index, MetaData metaData) { - validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), name -> metaData.index(name)); + validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), metaData::index); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 9afbbf95ae14d..ecc6c810ba8ba 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -1046,7 +1046,22 @@ public MetaData build() { } - // build all indices map + SortedMap aliasAndIndexLookup = Collections.unmodifiableSortedMap(buildAliasAndIndexLookup()); + + + // build all concrete indices arrays: + // TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices. + // When doing an operation across all indices, most of the time is spent on actually going to all shards and + // do the required operations, the bottleneck isn't resolving expressions into concrete indices. + String[] allIndicesArray = allIndices.toArray(new String[allIndices.size()]); + String[] allOpenIndicesArray = allOpenIndices.toArray(new String[allOpenIndices.size()]); + String[] allClosedIndicesArray = allClosedIndices.toArray(new String[allClosedIndices.size()]); + + return new MetaData(clusterUUID, version, transientSettings, persistentSettings, indices.build(), templates.build(), + customs.build(), allIndicesArray, allOpenIndicesArray, allClosedIndicesArray, aliasAndIndexLookup); + } + + private SortedMap buildAliasAndIndexLookup() { SortedMap aliasAndIndexLookup = new TreeMap<>(); for (ObjectCursor cursor : indices.values()) { IndexMetaData indexMetaData = cursor.value; @@ -1066,17 +1081,9 @@ public MetaData build() { }); } } - aliasAndIndexLookup = Collections.unmodifiableSortedMap(aliasAndIndexLookup); - // build all concrete indices arrays: - // TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices. - // When doing an operation across all indices, most of the time is spent on actually going to all shards and - // do the required operations, the bottleneck isn't resolving expressions into concrete indices. - String[] allIndicesArray = allIndices.toArray(new String[allIndices.size()]); - String[] allOpenIndicesArray = allOpenIndices.toArray(new String[allOpenIndices.size()]); - String[] allClosedIndicesArray = allClosedIndices.toArray(new String[allClosedIndices.size()]); - - return new MetaData(clusterUUID, version, transientSettings, persistentSettings, indices.build(), templates.build(), - customs.build(), allIndicesArray, allOpenIndicesArray, allClosedIndicesArray, aliasAndIndexLookup); + aliasAndIndexLookup.values().stream().filter(AliasOrIndex::isAlias) + .forEach(alias -> ((AliasOrIndex.Alias) alias).computeAndValidateWriteIndex()); + return aliasAndIndexLookup; } public static String toXContent(MetaData metaData) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 35883cb208031..f63876f836fab 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -513,7 +513,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } for (Alias alias : request.aliases()) { AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()) - .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); + .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).writeIndex(alias.writeIndex()).build(); indexMetaDataBuilder.putAlias(aliasMetaData); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 64c0afea3d0af..06eb008d20773 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -127,7 +127,7 @@ ClusterState innerExecute(ClusterState currentState, Iterable actio if (index == null) { throw new IndexNotFoundException(action.getIndex()); } - NewAliasValidator newAliasValidator = (alias, indexRouting, filter) -> { + NewAliasValidator newAliasValidator = (alias, indexRouting, filter, writeIndex) -> { /* It is important that we look up the index using the metadata builder we are modifying so we can remove an * index and replace it with an alias. */ Function indexLookup = name -> metadata.get(name); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java index 202530ccf9289..f2ae67e1fc1ab 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java @@ -114,6 +114,7 @@ public void testParseAdd() throws IOException { Map filter = randomBoolean() ? randomMap(5) : null; Object searchRouting = randomBoolean() ? randomRouting() : null; Object indexRouting = randomBoolean() ? randomBoolean() ? searchRouting : randomRouting() : null; + boolean writeIndex = randomBoolean(); XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); b.startObject(); { @@ -142,6 +143,7 @@ public void testParseAdd() throws IOException { if (indexRouting != null && false == indexRouting.equals(searchRouting)) { b.field("index_routing", indexRouting); } + b.field("is_write_index", writeIndex); } b.endObject(); } @@ -159,6 +161,7 @@ public void testParseAdd() throws IOException { } assertEquals(Objects.toString(searchRouting, null), action.searchRouting()); assertEquals(Objects.toString(indexRouting, null), action.indexRouting()); + assertEquals(writeIndex, action.writeIndex()); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java index 80b191398ada2..194b1cbe8ea8a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -69,6 +69,7 @@ public void testToXContent() throws IOException { Alias alias = new Alias("test_alias"); alias.routing("1"); alias.filter("{\"term\":{\"year\":2016}}"); + alias.writeIndex(true); request.alias(alias); Settings.Builder settings = Settings.builder(); @@ -79,7 +80,7 @@ public void testToXContent() throws IOException { String expectedRequestBody = "{\"settings\":{\"index\":{\"number_of_shards\":\"10\"}}," + "\"mappings\":{\"my_type\":{\"type\":{}}}," + - "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\"}}}"; + "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\",\"is_write_index\":true}}}"; assertEquals(expectedRequestBody, actualRequestBody); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java index ba595de5215a3..4fa99374f0fab 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java @@ -71,6 +71,7 @@ public void testToXContent() throws IOException { Alias alias = new Alias("test_alias"); alias.routing("1"); alias.filter("{\"term\":{\"year\":2016}}"); + alias.writeIndex(true); target.alias(alias); Settings.Builder settings = Settings.builder(); settings.put(SETTING_NUMBER_OF_SHARDS, 10); @@ -78,7 +79,7 @@ public void testToXContent() throws IOException { request.setTargetIndex(target); String actualRequestBody = Strings.toString(request); String expectedRequestBody = "{\"settings\":{\"index\":{\"number_of_shards\":\"10\"}}," + - "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\"}}}"; + "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\",\"is_write_index\":true}}}"; assertEquals(expectedRequestBody, actualRequestBody); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AliasMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AliasMetaDataTests.java index 00865cc9a6579..de23c560eb9af 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AliasMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AliasMetaDataTests.java @@ -41,6 +41,7 @@ public void testSerialization() throws IOException { .indexRouting("indexRouting") .routing("routing") .searchRouting("trim,tw , ltw , lw") + .writeIndex(randomBoolean() ? null : randomBoolean()) .build(); assertThat(before.searchRoutingValues(), equalTo(Sets.newHashSet("trim", "tw ", " ltw ", " lw"))); @@ -54,6 +55,21 @@ public void testSerialization() throws IOException { assertThat(after, equalTo(before)); } + @Override + protected void assertEqualInstances(AliasMetaData expectedInstance, AliasMetaData newInstance) { + assertNotSame(newInstance, expectedInstance); + if (expectedInstance.writeIndex() == null) { + expectedInstance = AliasMetaData.builder(expectedInstance.alias()) + .filter(expectedInstance.filter()) + .indexRouting(expectedInstance.indexRouting()) + .searchRouting(expectedInstance.searchRouting()) + .writeIndex(randomBoolean() ? null : randomBoolean()) + .build(); + } + assertEquals(expectedInstance, newInstance); + assertEquals(expectedInstance.hashCode(), newInstance.hashCode()); + } + @Override protected AliasMetaData createTestInstance() { return createTestItem(); @@ -95,6 +111,7 @@ private static AliasMetaData createTestItem() { if (randomBoolean()) { builder.filter("{\"term\":{\"year\":2016}}"); } + builder.writeIndex(randomBoolean()); return builder.build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 40673ba6d29cf..6220027ee4133 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -70,6 +70,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.mock; @@ -329,6 +330,40 @@ public void testValidateWaitForActiveShardsFailure() throws Exception { assertThat(e.getMessage(), containsString("invalid wait_for_active_shards")); } + public void testWriteIndex() throws Exception { + Boolean writeIndex = randomBoolean() ? null : randomBoolean(); + setupRequestAlias(new Alias("alias1").writeIndex(writeIndex)); + setupRequestMapping("mapping1", createMapping()); + setupRequestCustom("custom1", createCustom()); + reqSettings.put("key1", "value1"); + + final ClusterState result = executeTask(); + assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); + assertThat(result.metaData().index("test").getAliases().get("alias1").writeIndex(), equalTo(writeIndex)); + + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + } + + public void testWriteIndexValidationException() throws Exception { + IndexMetaData existingWriteIndex = IndexMetaData.builder("test2") + .settings(settings(Version.CURRENT)).putAlias(AliasMetaData.builder("alias1").writeIndex(true).build()) + .numberOfShards(1).numberOfReplicas(0).build(); + idxBuilder.put("test2", existingWriteIndex); + setupRequestMapping("mapping1", createMapping()); + setupRequestCustom("custom1", createCustom()); + reqSettings.put("key1", "value1"); + setupRequestAlias(new Alias("alias1").writeIndex(true)); + + Exception exception = expectThrows(IllegalStateException.class, () -> executeTask()); + assertThat(exception.getMessage(), startsWith("alias [alias1] has more than one write index [")); + + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + } + private IndexRoutingTable createIndexRoutingTableWithStartedShards(Index index) { final IndexRoutingTable idxRoutingTable = mock(IndexRoutingTable.class); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java index e5b52d8cf52bf..812dfd8f6f686 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; @@ -29,9 +30,13 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.List; import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.startsWith; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anySetOf; import static org.mockito.Mockito.mock; @@ -64,7 +69,7 @@ public void testAddAndRemove() { ClusterState before = createIndex(ClusterState.builder(ClusterName.DEFAULT).build(), index); // Add an alias to it - ClusterState after = service.innerExecute(before, singletonList(new AliasAction.Add(index, "test", null, null, null))); + ClusterState after = service.innerExecute(before, singletonList(new AliasAction.Add(index, "test", null, null, null, null))); AliasOrIndex alias = after.metaData().getAliasAndIndexLookup().get("test"); assertNotNull(alias); assertTrue(alias.isAlias()); @@ -74,7 +79,7 @@ public void testAddAndRemove() { before = after; after = service.innerExecute(before, Arrays.asList( new AliasAction.Remove(index, "test"), - new AliasAction.Add(index, "test_2", null, null, null))); + new AliasAction.Add(index, "test_2", null, null, null, null))); assertNull(after.metaData().getAliasAndIndexLookup().get("test")); alias = after.metaData().getAliasAndIndexLookup().get("test_2"); assertNotNull(alias); @@ -95,7 +100,7 @@ public void testSwapIndexWithAlias() { // Now remove "test" and add an alias to "test" to "test_2" in one go ClusterState after = service.innerExecute(before, Arrays.asList( - new AliasAction.Add("test_2", "test", null, null, null), + new AliasAction.Add("test_2", "test", null, null, null, null), new AliasAction.RemoveIndex("test"))); AliasOrIndex alias = after.metaData().getAliasAndIndexLookup().get("test"); assertNotNull(alias); @@ -109,7 +114,7 @@ public void testAddAliasToRemovedIndex() { // Attempt to add an alias to "test" at the same time as we remove it IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> service.innerExecute(before, Arrays.asList( - new AliasAction.Add("test", "alias", null, null, null), + new AliasAction.Add("test", "alias", null, null, null, null), new AliasAction.RemoveIndex("test")))); assertEquals("test", e.getIndex().getName()); } @@ -125,6 +130,127 @@ public void testRemoveIndexTwice() { assertNull(after.metaData().getAliasAndIndexLookup().get("test")); } + public void testAddWriteOnlyWithNoExistingAliases() { + ClusterState before = createIndex(ClusterState.builder(ClusterName.DEFAULT).build(), "test"); + + ClusterState after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, false))); + assertFalse(after.metaData().index("test").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test"))); + + after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, null))); + assertNull(after.metaData().index("test").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test"))); + + after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, true))); + assertTrue(after.metaData().index("test").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test"))); + } + + public void testAddWriteOnlyWithExistingWriteIndex() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .putAlias(AliasMetaData.builder("alias").writeIndex(true).build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + ClusterState after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, null))); + assertNull(after.metaData().index("test").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test2"))); + + Exception exception = expectThrows(IllegalStateException.class, () -> service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, true)))); + assertThat(exception.getMessage(), startsWith("alias [alias] has more than one write index [")); + } + + public void testSwapWriteOnlyIndex() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .putAlias(AliasMetaData.builder("alias").writeIndex(true).build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + Boolean unsetValue = randomBoolean() ? null : false; + List swapActions = Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, unsetValue), + new AliasAction.Add("test2", "alias", null, null, null, true) + ); + Collections.shuffle(swapActions, random()); + ClusterState after = service.innerExecute(before, swapActions); + assertThat(after.metaData().index("test").getAliases().get("alias").writeIndex(), equalTo(unsetValue)); + assertTrue(after.metaData().index("test2").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test2"))); + } + + public void testAddWriteOnlyWithExistingNonWriteIndices() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .putAlias(AliasMetaData.builder("alias").writeIndex(randomBoolean() ? null : false).build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .putAlias(AliasMetaData.builder("alias").writeIndex(randomBoolean() ? null : false).build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData3 = IndexMetaData.builder("test3") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2).put(indexMetaData3)).build(); + + assertNull(((AliasOrIndex.Alias) before.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex()); + + ClusterState after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test3", "alias", null, null, null, true))); + assertTrue(after.metaData().index("test3").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test3"))); + + } + + public void testAddWriteOnlyWithIndexRemoved() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .putAlias(AliasMetaData.builder("alias").build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .putAlias(AliasMetaData.builder("alias").build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + assertNull(before.metaData().index("test").getAliases().get("alias").writeIndex()); + assertNull(before.metaData().index("test2").getAliases().get("alias").writeIndex()); + assertNull(((AliasOrIndex.Alias) before.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex()); + + ClusterState after = service.innerExecute(before, Collections.singletonList(new AliasAction.RemoveIndex("test"))); + assertNull(after.metaData().index("test2").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test2"))); + } + + public void testAddWriteOnlyValidatesAgainstMetaDataBuilder() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + Exception exception = expectThrows(IllegalStateException.class, () -> service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, true), + new AliasAction.Add("test2", "alias", null, null, null, true) + ))); + assertThat(exception.getMessage(), startsWith("alias [alias] has more than one write index [")); + } + private ClusterState createIndex(ClusterState state, String index) { IndexMetaData indexMetaData = IndexMetaData.builder(index) .settings(Settings.builder().put("index.version.created", VersionUtils.randomVersion(random()))) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 74d13a2aab046..a221ee568b0cc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -99,6 +99,34 @@ public void testAliasCollidingWithAnExistingIndex() { } } + public void testValidateAliasWriteOnly() { + String alias = randomAlphaOfLength(5); + String indexA = randomAlphaOfLength(6); + String indexB = randomAlphaOfLength(7); + Boolean aWriteIndex = randomBoolean() ? null : randomBoolean(); + Boolean bWriteIndex; + if (Boolean.TRUE.equals(aWriteIndex)) { + bWriteIndex = randomFrom(Boolean.FALSE, null); + } else { + bWriteIndex = randomFrom(Boolean.TRUE, Boolean.FALSE, null); + } + // when only one index/alias pair exist + MetaData metaData = MetaData.builder().put(buildIndexMetaData(indexA, alias, aWriteIndex)).build(); + + // when alias points to two indices, but valid + // one of the following combinations: [(null, null), (null, true), (null, false), (false, false)] + MetaData.builder(metaData).put(buildIndexMetaData(indexB, alias, bWriteIndex)).build(); + + // when too many write indices + Exception exception = expectThrows(IllegalStateException.class, + () -> { + IndexMetaData.Builder metaA = buildIndexMetaData(indexA, alias, true); + IndexMetaData.Builder metaB = buildIndexMetaData(indexB, alias, true); + MetaData.builder().put(metaA).put(metaB).build(); + }); + assertThat(exception.getMessage(), startsWith("alias [" + alias + "] has more than one write index [")); + } + public void testResolveIndexRouting() { IndexMetaData.Builder builder = IndexMetaData.builder("index") .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) @@ -450,6 +478,13 @@ public void testFindMappingsWithFilters() throws IOException { } } + private IndexMetaData.Builder buildIndexMetaData(String name, String alias, Boolean writeIndex) { + return IndexMetaData.builder(name) + .settings(settings(Version.CURRENT)).creationDate(randomNonNegativeLong()) + .putAlias(AliasMetaData.builder(alias).writeIndex(writeIndex)) + .numberOfShards(1).numberOfReplicas(0); + } + @SuppressWarnings("unchecked") private static void assertIndexMappingsNoFields(ImmutableOpenMap> mappings, String index) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java index 3b80d1f6e2cf0..3ac55ec663ca0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java @@ -111,7 +111,7 @@ public void testSimpleJsonFromAndTo() throws IOException { .putMapping("mapping1", MAPPING_SOURCE1) .putMapping("mapping2", MAPPING_SOURCE2) .putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1)) - .putAlias(newAliasMetaDataBuilder("alias2")) + .putAlias(newAliasMetaDataBuilder("alias2").writeIndex(randomBoolean() ? null : randomBoolean())) .putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2))) .put(IndexTemplateMetaData.builder("foo") .patterns(Collections.singletonList("bar")) @@ -132,7 +132,7 @@ public void testSimpleJsonFromAndTo() throws IOException { .putMapping("mapping1", MAPPING_SOURCE1) .putMapping("mapping2", MAPPING_SOURCE2) .putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1)) - .putAlias(newAliasMetaDataBuilder("alias2")) + .putAlias(newAliasMetaDataBuilder("alias3").writeIndex(randomBoolean() ? null : randomBoolean())) .putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2))) .put(IndexTemplateMetaData.builder("foo") .patterns(Collections.singletonList("bar")) @@ -146,7 +146,6 @@ public void testSimpleJsonFromAndTo() throws IOException { .build(); String metaDataSource = MetaData.Builder.toXContent(metaData); -// System.out.println("ToJson: " + metaDataSource); MetaData parsedMetaData = MetaData.Builder.fromXContent(createParser(JsonXContent.jsonXContent, metaDataSource)); @@ -270,6 +269,8 @@ public void testSimpleJsonFromAndTo() throws IOException { assertThat(indexMetaData.getAliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1)); assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2")); assertThat(indexMetaData.getAliases().get("alias2").filter(), nullValue()); + assertThat(indexMetaData.getAliases().get("alias2").writeIndex(), + equalTo(metaData.index("test11").getAliases().get("alias2").writeIndex())); assertThat(indexMetaData.getAliases().get("alias4").alias(), equalTo("alias4")); assertThat(indexMetaData.getAliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2)); @@ -286,8 +287,10 @@ public void testSimpleJsonFromAndTo() throws IOException { assertThat(indexMetaData.getAliases().size(), equalTo(3)); assertThat(indexMetaData.getAliases().get("alias1").alias(), equalTo("alias1")); assertThat(indexMetaData.getAliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1)); - assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2")); - assertThat(indexMetaData.getAliases().get("alias2").filter(), nullValue()); + assertThat(indexMetaData.getAliases().get("alias3").alias(), equalTo("alias3")); + assertThat(indexMetaData.getAliases().get("alias3").filter(), nullValue()); + assertThat(indexMetaData.getAliases().get("alias3").writeIndex(), + equalTo(metaData.index("test12").getAliases().get("alias3").writeIndex())); assertThat(indexMetaData.getAliases().get("alias4").alias(), equalTo("alias4")); assertThat(indexMetaData.getAliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2)); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 69daa2be26feb..c1e0616cb7609 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1518,9 +1518,9 @@ public void testRenameOnRestore() throws Exception { ensureGreen(); assertAcked(client.admin().indices().prepareAliases() - .addAlias("test-idx-1", "alias-1") - .addAlias("test-idx-2", "alias-2") - .addAlias("test-idx-3", "alias-3") + .addAlias("test-idx-1", "alias-1", false) + .addAlias("test-idx-2", "alias-2", false) + .addAlias("test-idx-3", "alias-3", false) ); logger.info("--> indexing some data"); diff --git a/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java b/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java index 3e42e3b304e00..e88a9f0a38d2c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java @@ -138,6 +138,10 @@ private static Alias randomAlias() { alias.filter("{\"term\":{\"year\":2016}}"); } + if (randomBoolean()) { + alias.writeIndex(randomBoolean()); + } + return alias; } } From 1a1164cea74d66ee4a1d6357926ba266618cb2a9 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 20 Jun 2018 08:17:11 -0700 Subject: [PATCH 16/17] [DOCS] Move licensing APIs to docs (#31445) --- .../licensing}/delete-license.asciidoc | 2 ++ .../licensing}/get-basic-status.asciidoc | 1 + .../reference/licensing}/get-license.asciidoc | 9 +++----- .../licensing}/get-trial-status.asciidoc | 1 + docs/reference/licensing/index.asciidoc | 22 +++++++++++++++++++ .../reference/licensing}/start-basic.asciidoc | 1 + .../reference/licensing}/start-trial.asciidoc | 1 + .../licensing}/update-license.asciidoc | 2 ++ docs/reference/rest-api/index.asciidoc | 2 +- x-pack/docs/build.gradle | 2 -- x-pack/docs/en/rest-api/licensing.asciidoc | 22 ------------------- 11 files changed, 34 insertions(+), 31 deletions(-) rename {x-pack/docs/en/rest-api/license => docs/reference/licensing}/delete-license.asciidoc (97%) rename {x-pack/docs/en/rest-api/license => docs/reference/licensing}/get-basic-status.asciidoc (98%) rename {x-pack/docs/en/rest-api/license => docs/reference/licensing}/get-license.asciidoc (85%) rename {x-pack/docs/en/rest-api/license => docs/reference/licensing}/get-trial-status.asciidoc (98%) create mode 100644 docs/reference/licensing/index.asciidoc rename {x-pack/docs/en/rest-api/license => docs/reference/licensing}/start-basic.asciidoc (99%) rename {x-pack/docs/en/rest-api/license => docs/reference/licensing}/start-trial.asciidoc (98%) rename {x-pack/docs/en/rest-api/license => docs/reference/licensing}/update-license.asciidoc (99%) delete mode 100644 x-pack/docs/en/rest-api/licensing.asciidoc diff --git a/x-pack/docs/en/rest-api/license/delete-license.asciidoc b/docs/reference/licensing/delete-license.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/license/delete-license.asciidoc rename to docs/reference/licensing/delete-license.asciidoc index 24662664daa40..b02406045a989 100644 --- a/x-pack/docs/en/rest-api/license/delete-license.asciidoc +++ b/docs/reference/licensing/delete-license.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[delete-license]] === Delete License API @@ -41,3 +42,4 @@ When the license is successfully deleted, the API returns the following response "acknowledged": true } ------------------------------------------------------------ +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/license/get-basic-status.asciidoc b/docs/reference/licensing/get-basic-status.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/license/get-basic-status.asciidoc rename to docs/reference/licensing/get-basic-status.asciidoc index c6c6385447ab3..a9cc9cf67add6 100644 --- a/x-pack/docs/en/rest-api/license/get-basic-status.asciidoc +++ b/docs/reference/licensing/get-basic-status.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[get-basic-status]] === Get Basic Status API diff --git a/x-pack/docs/en/rest-api/license/get-license.asciidoc b/docs/reference/licensing/get-license.asciidoc similarity index 85% rename from x-pack/docs/en/rest-api/license/get-license.asciidoc rename to docs/reference/licensing/get-license.asciidoc index cba6e71057661..bf094d99f2f5a 100644 --- a/x-pack/docs/en/rest-api/license/get-license.asciidoc +++ b/docs/reference/licensing/get-license.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[get-license]] === Get License API @@ -52,11 +53,9 @@ GET _xpack/license "license" : { "status" : "active", "uid" : "cbff45e7-c553-41f7-ae4f-9205eabd80xx", - "type" : "trial", + "type" : "basic", "issue_date" : "2018-02-22T23:12:05.550Z", "issue_date_in_millis" : 1519341125550, - "expiry_date" : "2018-03-24T23:12:05.550Z", - "expiry_date_in_millis" : 1521933125550, "max_nodes" : 1000, "issued_to" : "test", "issuer" : "elasticsearch", @@ -65,11 +64,9 @@ GET _xpack/license } -------------------------------------------------- // TESTRESPONSE[s/"cbff45e7-c553-41f7-ae4f-9205eabd80xx"/$body.license.uid/] -// TESTRESPONSE[s/"trial"/$body.license.type/] +// TESTRESPONSE[s/"basic"/$body.license.type/] // TESTRESPONSE[s/"2018-02-22T23:12:05.550Z"/$body.license.issue_date/] // TESTRESPONSE[s/1519341125550/$body.license.issue_date_in_millis/] -// TESTRESPONSE[s/"2018-03-24T23:12:05.550Z"/$body.license.expiry_date/] -// TESTRESPONSE[s/1521933125550/$body.license.expiry_date_in_millis/] // TESTRESPONSE[s/1000/$body.license.max_nodes/] // TESTRESPONSE[s/"test"/$body.license.issued_to/] // TESTRESPONSE[s/"elasticsearch"/$body.license.issuer/] diff --git a/x-pack/docs/en/rest-api/license/get-trial-status.asciidoc b/docs/reference/licensing/get-trial-status.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/license/get-trial-status.asciidoc rename to docs/reference/licensing/get-trial-status.asciidoc index b2cc1ce1b6c88..ec47782a3d2ee 100644 --- a/x-pack/docs/en/rest-api/license/get-trial-status.asciidoc +++ b/docs/reference/licensing/get-trial-status.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[get-trial-status]] === Get Trial Status API diff --git a/docs/reference/licensing/index.asciidoc b/docs/reference/licensing/index.asciidoc new file mode 100644 index 0000000000000..a1dfd398acfe7 --- /dev/null +++ b/docs/reference/licensing/index.asciidoc @@ -0,0 +1,22 @@ +[role="xpack"] +[[licensing-apis]] +== Licensing APIs + +You can use the following APIs to manage your licenses: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + + +include::delete-license.asciidoc[] +include::get-license.asciidoc[] +include::get-trial-status.asciidoc[] +include::start-trial.asciidoc[] +include::get-basic-status.asciidoc[] +include::start-basic.asciidoc[] +include::update-license.asciidoc[] diff --git a/x-pack/docs/en/rest-api/license/start-basic.asciidoc b/docs/reference/licensing/start-basic.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/license/start-basic.asciidoc rename to docs/reference/licensing/start-basic.asciidoc index 820b2b5eab64a..3206dc0801f36 100644 --- a/x-pack/docs/en/rest-api/license/start-basic.asciidoc +++ b/docs/reference/licensing/start-basic.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[start-basic]] === Start Basic API diff --git a/x-pack/docs/en/rest-api/license/start-trial.asciidoc b/docs/reference/licensing/start-trial.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/license/start-trial.asciidoc rename to docs/reference/licensing/start-trial.asciidoc index 341c72853fd08..ba1cc0d786693 100644 --- a/x-pack/docs/en/rest-api/license/start-trial.asciidoc +++ b/docs/reference/licensing/start-trial.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[start-trial]] === Start Trial API diff --git a/x-pack/docs/en/rest-api/license/update-license.asciidoc b/docs/reference/licensing/update-license.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/license/update-license.asciidoc rename to docs/reference/licensing/update-license.asciidoc index 54c5539840772..b340cf3ed6ee5 100644 --- a/x-pack/docs/en/rest-api/license/update-license.asciidoc +++ b/docs/reference/licensing/update-license.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[update-license]] === Update License API @@ -123,6 +124,7 @@ receive the following response: } } ------------------------------------------------------------ +// NOTCONSOLE To complete the update, you must re-submit the API request and set the `acknowledge` parameter to `true`. For example: diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index b9d3c9db60a6f..e44eea9aa53f4 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -20,7 +20,7 @@ directly to configure and access {xpack} features. include::info.asciidoc[] include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] -include::{xes-repo-dir}/rest-api/licensing.asciidoc[] +include::{es-repo-dir}/licensing/index.asciidoc[] include::{xes-repo-dir}/rest-api/migration.asciidoc[] include::{xes-repo-dir}/rest-api/ml-api.asciidoc[] include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[] diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 6061c7bd8fc24..cc072609b7af3 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -47,8 +47,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/watcher/trigger/schedule/weekly.asciidoc', 'en/watcher/trigger/schedule/yearly.asciidoc', 'en/watcher/troubleshooting.asciidoc', - 'en/rest-api/license/delete-license.asciidoc', - 'en/rest-api/license/update-license.asciidoc', 'en/rest-api/ml/delete-snapshot.asciidoc', 'en/rest-api/ml/forecast.asciidoc', 'en/rest-api/ml/get-bucket.asciidoc', diff --git a/x-pack/docs/en/rest-api/licensing.asciidoc b/x-pack/docs/en/rest-api/licensing.asciidoc deleted file mode 100644 index b30590630f7f9..0000000000000 --- a/x-pack/docs/en/rest-api/licensing.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -[role="xpack"] -[[licensing-apis]] -== Licensing APIs - -You can use the following APIs to manage your licenses: - -* <> -* <> -* <> -* <> -* <> -* <> -* <> - - -include::license/delete-license.asciidoc[] -include::license/get-license.asciidoc[] -include::license/get-trial-status.asciidoc[] -include::license/start-trial.asciidoc[] -include::license/get-basic-status.asciidoc[] -include::license/start-basic.asciidoc[] -include::license/update-license.asciidoc[] From 60b4be690f4b0921c3fd07ddd80117b300a50f91 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 20 Jun 2018 16:57:51 +0100 Subject: [PATCH 17/17] [DOCS] Omit shard failures assertion for incompatible responses (#31430) Filter out the assertion for _cat and _xpack/ml/datafeed APIs --- .../doc/RestTestsFromSnippetsTask.groovy | 19 ++++++++++++------- .../doc/RestTestsFromSnippetsTaskTest.groovy | 10 +++++++--- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 15a4f21b17543..4ffda0c9617c6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -27,7 +27,6 @@ import org.gradle.api.tasks.OutputDirectory import java.nio.file.Files import java.nio.file.Path -import java.util.regex.Matcher /** * Generates REST tests for each snippet marked // TEST. @@ -100,6 +99,14 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { return snippet.language == 'js' || snippet.curl } + /** + * Certain requests should not have the shard failure check because the + * format of the response is incompatible i.e. it is not a JSON object. + */ + static shouldAddShardFailureCheck(String path) { + return path.startsWith('_cat') == false && path.startsWith('_xpack/ml/datafeeds/') == false + } + /** * Converts Kibana's block quoted strings into standard JSON. These * {@code """} delimited strings can be embedded in CONSOLE and can @@ -308,13 +315,11 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { * no shard succeeds. But we need to fail the tests on all of these * because they mean invalid syntax or broken queries or something * else that we don't want to teach people to do. The REST test - * framework doesn't allow us to has assertions in the setup - * section so we have to skip it there. We also have to skip _cat - * actions because they don't return json so we can't is_false - * them. That is ok because they don't have this - * partial-success-is-success thing. + * framework doesn't allow us to have assertions in the setup + * section so we have to skip it there. We also omit the assertion + * from APIs that don't return a JSON object */ - if (false == inSetup && false == path.startsWith('_cat')) { + if (false == inSetup && shouldAddShardFailureCheck(path)) { current.println(" - is_false: _shards.failures") } } diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTaskTest.groovy b/buildSrc/src/test/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTaskTest.groovy index d0a7a2825e6f2..b986319492001 100644 --- a/buildSrc/src/test/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTaskTest.groovy +++ b/buildSrc/src/test/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTaskTest.groovy @@ -19,9 +19,7 @@ package org.elasticsearch.gradle.doc -import org.elasticsearch.gradle.doc.SnippetsTask.Snippet -import org.gradle.api.InvalidUserDataException - +import static org.elasticsearch.gradle.doc.RestTestsFromSnippetsTask.shouldAddShardFailureCheck import static org.elasticsearch.gradle.doc.RestTestsFromSnippetsTask.replaceBlockQuote class RestTestFromSnippetsTaskTest extends GroovyTestCase { @@ -47,4 +45,10 @@ class RestTestFromSnippetsTaskTest extends GroovyTestCase { assertEquals("\"foo\": \"bort\\n baz\"", replaceBlockQuote("\"foo\": \"\"\"bort\n baz\"\"\"")); } + + void testIsDocWriteRequest() { + assertTrue(shouldAddShardFailureCheck("doc-index/_search")); + assertFalse(shouldAddShardFailureCheck("_cat")) + assertFalse(shouldAddShardFailureCheck("_xpack/ml/datafeeds/datafeed-id/_preview")); + } }